Merge "Fix decoder instantiation during playback"
diff --git a/camera/cameraserver/Android.mk b/camera/cameraserver/Android.mk
index f0a0db0..d32e252 100644
--- a/camera/cameraserver/Android.mk
+++ b/camera/cameraserver/Android.mk
@@ -24,6 +24,7 @@
liblog \
libutils \
libui \
+ libgui \
libbinder \
libhidltransport \
android.hardware.camera.common@1.0 \
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
index 6747e60..a3b1a34 100644
--- a/cmds/screenrecord/Android.mk
+++ b/cmds/screenrecord/Android.mk
@@ -34,6 +34,7 @@
$(TOP)/frameworks/native/include/media/openmax \
external/jpeg
+LOCAL_CFLAGS := -Werror -Wall
LOCAL_CFLAGS += -Wno-multichar
#LOCAL_CFLAGS += -UNDEBUG
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index aa800d8..17d7046 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -202,7 +202,6 @@
mGlConsumer->getTransformMatrix(texMatrix);
nsecs_t monotonicNsec = mGlConsumer->getTimestamp();
nsecs_t frameNumber = mGlConsumer->getFrameNumber();
- int64_t droppedFrames = 0;
if (mLastFrameNumber > 0) {
mTotalDroppedFrames += size_t(frameNumber - mLastFrameNumber) - 1;
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 6097f01..de0167a 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -215,7 +215,6 @@
*/
static status_t setDisplayProjection(const sp<IBinder>& dpy,
const DisplayInfo& mainDpyInfo) {
- status_t err;
// Set the region of the layer stack we're interested in, which in our
// case is "all of it". If the app is rotated (so that the width of the
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 6b42ff0..7b70205 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -257,17 +257,21 @@
size_t opaqueSize = data.readInt32();
void *opaqueData = NULL;
- if (opaqueSize > 0) {
- opaqueData = malloc(opaqueSize);
- data.read(opaqueData, opaqueSize);
+ const size_t kMaxOpaqueSize = 100 * 1024;
+ if (opaqueSize > kMaxOpaqueSize) {
+ return BAD_VALUE;
}
+ opaqueData = malloc(opaqueSize);
+ if (NULL == opaqueData) {
+ return NO_MEMORY;
+ }
+
+ data.read(opaqueData, opaqueSize);
reply->writeInt32(createPlugin(uuid, opaqueData, opaqueSize));
- if (opaqueData != NULL) {
- free(opaqueData);
- opaqueData = NULL;
- }
+ free(opaqueData);
+ opaqueData = NULL;
return OK;
}
diff --git a/drm/mediacas/plugins/clearkey/Android.mk b/drm/mediacas/plugins/clearkey/Android.mk
new file mode 100644
index 0000000..0c2b357
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/Android.mk
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ ClearKeyCasPlugin.cpp \
+ ClearKeyFetcher.cpp \
+ ClearKeyLicenseFetcher.cpp \
+ ClearKeySessionLibrary.cpp \
+ ecm.cpp \
+ ecm_generator.cpp \
+ JsonAssetLoader.cpp \
+ protos/license_protos.proto \
+
+LOCAL_MODULE := libclearkeycasplugin
+
+LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := mediacas
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ liblog \
+ libcrypto \
+ libstagefright_foundation \
+ libprotobuf-cpp-lite \
+
+LOCAL_STATIC_LIBRARIES := \
+ libjsmn \
+
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+LOCAL_PROTOC_OPTIMIZE_TYPE := full
+
+define proto_includes
+$(call local-generated-sources-dir)/proto/$(LOCAL_PATH)
+endef
+
+LOCAL_C_INCLUDES += \
+ external/jsmn \
+ frameworks/av/include \
+ frameworks/native/include/media \
+ $(call proto_includes)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(call proto_includes)
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
+
+#########################################################################
+# Build unit tests
+
+include $(LOCAL_PATH)/tests/Android.mk
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
new file mode 100644
index 0000000..221b74b
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyCasPlugin"
+
+#include "ClearKeyFetcher.h"
+#include "ecm.h"
+#include "ClearKeyLicenseFetcher.h"
+#include "ClearKeyCasPlugin.h"
+#include "ClearKeySessionLibrary.h"
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+android::CasFactory* createCasFactory() {
+ return new android::clearkeycas::ClearKeyCasFactory();
+}
+
+android::DescramblerFactory *createDescramblerFactory()
+{
+ return new android::clearkeycas::ClearKeyDescramblerFactory();
+}
+
+namespace android {
+namespace clearkeycas {
+
+static const int32_t sClearKeySystemId = 0xF6D8;
+
+bool ClearKeyCasFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sClearKeySystemId;
+}
+
+status_t ClearKeyCasFactory::queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const {
+ descriptors->clear();
+ descriptors->push_back({sClearKeySystemId, String8("Clear Key CAS")});
+ return OK;
+}
+
+status_t ClearKeyCasFactory::createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new ClearKeyCasPlugin(appData, callback);
+ return OK;
+}
+///////////////////////////////////////////////////////////////////////////////
+bool ClearKeyDescramblerFactory::isSystemIdSupported(
+ int32_t CA_system_id) const {
+ return CA_system_id == sClearKeySystemId;
+}
+
+status_t ClearKeyDescramblerFactory::createPlugin(
+ int32_t CA_system_id, DescramblerPlugin** plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new ClearKeyDescramblerPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+ClearKeyCasPlugin::ClearKeyCasPlugin(
+ uint64_t appData, CasPluginCallback callback)
+ : mAppData(appData), mCallback(callback) {
+ ALOGV("CTOR");
+}
+
+ClearKeyCasPlugin::~ClearKeyCasPlugin() {
+ ALOGV("DTOR");
+ ClearKeySessionLibrary::get()->destroyPlugin(this);
+}
+
+status_t ClearKeyCasPlugin::setPrivateData(const CasData &data) {
+ ALOGV("setPrivateData");
+
+ return OK;
+}
+
+static String8 sessionIdToString(const std::vector<uint8_t> &array) {
+ String8 result;
+ for (size_t i = 0; i < array.size(); i++) {
+ result.appendFormat("%02x ", array[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+status_t ClearKeyCasPlugin::openSession(
+ uint16_t program_number, CasSessionId* sessionId) {
+ ALOGV("openSession: program_number=%u", program_number);
+
+ return ClearKeySessionLibrary::get()->addSession(
+ this, program_number, 0, sessionId);
+}
+
+status_t ClearKeyCasPlugin::openSession(
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) {
+ ALOGV("openSession: program_number=%u, elementary_PID=%u",
+ program_number, elementary_PID);
+
+ return ClearKeySessionLibrary::get()->addSession(
+ this, program_number, elementary_PID, sessionId);
+}
+
+status_t ClearKeyCasPlugin::closeSession(const CasSessionId &sessionId) {
+ ALOGV("closeSession: sessionId=%s", sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ ClearKeySessionLibrary::get()->destroySession(sessionId);
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::setSessionPrivateData(
+ const CasSessionId &sessionId, const CasData &data) {
+ ALOGV("setSessionPrivateData: sessionId=%s",
+ sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::processEcm(
+ const CasSessionId &sessionId, const CasEcm& ecm) {
+ ALOGV("processEcm: sessionId=%s", sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return session->updateECM(mKeyFetcher.get(), (void*)ecm.data(), ecm.size());
+}
+
+status_t ClearKeyCasPlugin::processEmm(const CasEmm& emm) {
+ ALOGV("processEmm");
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) {
+ ALOGV("sendEvent: event=%d, arg=%d", event, arg);
+ // Echo the received event to the callback.
+ // Clear key plugin doesn't use any event, echo'ing for testing only.
+ if (mCallback != NULL) {
+ mCallback((void*)mAppData, event, arg, (uint8_t*)eventData.data(), eventData.size());
+ }
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::provision(const String8 &str) {
+ ALOGV("provision: provisionString=%s", str.string());
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ std::unique_ptr<ClearKeyLicenseFetcher> license_fetcher;
+ license_fetcher.reset(new ClearKeyLicenseFetcher());
+ status_t err = license_fetcher->Init(str.string());
+ if (err != OK) {
+ ALOGE("provision: failed to init ClearKeyLicenseFetcher (err=%d)", err);
+ return err;
+ }
+
+ std::unique_ptr<ClearKeyFetcher> key_fetcher;
+ key_fetcher.reset(new ClearKeyFetcher(std::move(license_fetcher)));
+ err = key_fetcher->Init();
+ if (err != OK) {
+ ALOGE("provision: failed to init ClearKeyFetcher (err=%d)", err);
+ return err;
+ }
+
+ ALOGV("provision: using ClearKeyFetcher");
+ mKeyFetcher = std::move(key_fetcher);
+
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) {
+ ALOGV("refreshEntitlements");
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////
+
+// AES-128 CBC-CTS decrypt optimized for Transport Packets. |key| is the AES
+// key (odd key or even key), |length| is the data size, and |buffer| is the
+// ciphertext to be decrypted in place.
+status_t TpBlockCtsDecrypt(const AES_KEY& key, size_t length, char* buffer) {
+ CHECK(buffer);
+
+ // Invariant: Packet must be at least 16 bytes.
+ CHECK(length >= AES_BLOCK_SIZE);
+
+ // OpenSSL uses unsigned char.
+ unsigned char* data = reinterpret_cast<unsigned char*>(buffer);
+
+ // Start with zero-filled initialization vector.
+ unsigned char iv[AES_BLOCK_SIZE];
+ memset(iv, 0, AES_BLOCK_SIZE);
+
+ // Size of partial last block handled via CTS.
+ int cts_byte_count = length % AES_BLOCK_SIZE;
+
+ // If there no is no partial last block, then process using normal CBC.
+ if (cts_byte_count == 0) {
+ AES_cbc_encrypt(data, data, length, &key, iv, 0);
+ return OK;
+ }
+
+ // Cipher text stealing (CTS) - Schneier Figure 9.5 p 196.
+ // In CTS mode, the last two blocks have been swapped. Block[n-1] is really
+ // the original block[n] combined with the low-order bytes of the original
+ // block[n-1], while block[n] is the high-order bytes of the original
+ // block[n-1] padded with zeros.
+
+ // Block[0] - block[n-2] are handled with normal CBC.
+ int cbc_byte_count = length - cts_byte_count - AES_BLOCK_SIZE;
+ if (cbc_byte_count > 0) {
+ AES_cbc_encrypt(data, data, cbc_byte_count, &key, iv, 0);
+ // |data| points to block[n-1].
+ data += cbc_byte_count;
+ }
+
+ // Save block[n] to use as IV when decrypting block[n-1].
+ unsigned char block_n[AES_BLOCK_SIZE];
+ memset(block_n, 0, AES_BLOCK_SIZE);
+ memcpy(block_n, data + AES_BLOCK_SIZE, cts_byte_count);
+
+ // Decrypt block[n-1] using block[n] as IV, consistent with the original
+ // block order.
+ AES_cbc_encrypt(data, data, AES_BLOCK_SIZE, &key, block_n, 0);
+
+ // Return the stolen ciphertext: swap the high-order bytes of block[n]
+ // and block[n-1].
+ for (int i = 0; i < cts_byte_count; i++) {
+ unsigned char temp = *(data + i);
+ *(data + i) = *(data + AES_BLOCK_SIZE + i);
+ *(data + AES_BLOCK_SIZE + i) = temp;
+ }
+
+ // Decrypt block[n-1] using previous IV.
+ AES_cbc_encrypt(data, data, AES_BLOCK_SIZE, &key, iv, 0);
+ return OK;
+}
+
+// PES header and ECM stream header layout
+//
+// processECM() receives the data_byte portion from the transport packet.
+// Below is the layout of the first 16 bytes of the ECM PES packet. Here
+// we don't parse them, we skip them and go to the ECM container directly.
+// The layout is included here only for reference.
+//
+// 0-2: 0x00 00 01 = start code prefix.
+// 3: 0xf0 = stream type (90 = ECM).
+// 4-5: 0x00 00 = PES length (filled in later, this is the length of the
+// PES header (16) plus the length of the ECM container).
+// 6-7: 0x00 00 = ECM major version.
+// 8-9: 0x00 01 = ECM minor version.
+// 10-11: 0x00 00 = Crypto period ID (filled in later).
+// 12-13: 0x00 00 = ECM container length (filled in later, either 84 or
+// 166).
+// 14-15: 0x00 00 = offset = 0.
+
+const static size_t kEcmHeaderLength = 16;
+const static size_t kUserKeyLength = 16;
+
+status_t ClearKeyCasSession::updateECM(
+ KeyFetcher *keyFetcher, void *ecm, size_t size) {
+ if (keyFetcher == nullptr) {
+ return ERROR_DRM_NOT_PROVISIONED;
+ }
+
+ if (size < kEcmHeaderLength) {
+ ALOGE("updateECM: invalid ecm size %zu", size);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _lock(mKeyLock);
+
+ if (mEcmBuffer != NULL && mEcmBuffer->capacity() == size
+ && !memcmp(mEcmBuffer->base(), ecm, size)) {
+ return OK;
+ }
+
+ mEcmBuffer = ABuffer::CreateAsCopy(ecm, size);
+ mEcmBuffer->setRange(kEcmHeaderLength, size - kEcmHeaderLength);
+
+ uint64_t asset_id;
+ std::vector<KeyFetcher::KeyInfo> keys;
+ status_t err = keyFetcher->ObtainKey(mEcmBuffer, &asset_id, &keys);
+ if (err != OK) {
+ ALOGE("updateECM: failed to obtain key (err=%d)", err);
+ return err;
+ }
+
+ ALOGV("updateECM: %zu key(s) found", keys.size());
+ for (size_t keyIndex = 0; keyIndex < keys.size(); keyIndex++) {
+ String8 str;
+
+ const sp<ABuffer>& keyBytes = keys[keyIndex].key_bytes;
+ CHECK(keyBytes->size() == kUserKeyLength);
+
+ int result = AES_set_decrypt_key(
+ reinterpret_cast<const uint8_t*>(keyBytes->data()),
+ AES_BLOCK_SIZE * 8, &mKeyInfo[keyIndex].contentKey);
+ mKeyInfo[keyIndex].valid = (result == 0);
+ if (!mKeyInfo[keyIndex].valid) {
+ ALOGE("updateECM: failed to set key %d, key_id=%d",
+ keyIndex, keys[keyIndex].key_id);
+ }
+ }
+ return OK;
+}
+
+// Decryption of a set of sub-samples
+ssize_t ClearKeyCasSession::decrypt(
+ bool secure, DescramblerPlugin::ScramblingControl scramblingControl,
+ size_t numSubSamples, const DescramblerPlugin::SubSample *subSamples,
+ const void *srcPtr, void *dstPtr, AString * /* errorDetailMsg */) {
+ AES_KEY contentKey;
+
+ if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled) {
+ // Hold lock to get the key only to avoid contention for decryption
+ Mutex::Autolock _lock(mKeyLock);
+
+ int32_t keyIndex = (scramblingControl & 1);
+ if (!mKeyInfo[keyIndex].valid) {
+ ALOGE("decrypt: key %d is invalid", keyIndex);
+ return ERROR_DRM_DECRYPT;
+ }
+ contentKey = mKeyInfo[keyIndex].contentKey;
+ }
+
+ uint8_t *src = (uint8_t*)srcPtr;
+ uint8_t *dst = (uint8_t*)dstPtr;
+
+ for (size_t i = 0; i < numSubSamples; i++) {
+ size_t numBytesinSubSample = subSamples[i].mNumBytesOfClearData
+ + subSamples[i].mNumBytesOfEncryptedData;
+ if (src != dst) {
+ memcpy(dst, src, numBytesinSubSample);
+ }
+ status_t err = OK;
+ // Don't decrypt if len < AES_BLOCK_SIZE.
+ // The last chunk shorter than AES_BLOCK_SIZE is not encrypted.
+ if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled
+ && subSamples[i].mNumBytesOfEncryptedData >= AES_BLOCK_SIZE) {
+ err = decryptPayload(
+ contentKey,
+ numBytesinSubSample,
+ subSamples[i].mNumBytesOfClearData,
+ (char *)dst);
+ }
+
+ dst += numBytesinSubSample;
+ src += numBytesinSubSample;
+ }
+ return dst - (uint8_t *)dstPtr;
+}
+
+// Decryption of a TS payload
+status_t ClearKeyCasSession::decryptPayload(
+ const AES_KEY& key, size_t length, size_t offset, char* buffer) const {
+ CHECK(buffer);
+
+ // Invariant: only call decryptPayload with TS packets with at least 16
+ // bytes of payload (AES_BLOCK_SIZE).
+
+ CHECK(length >= offset + AES_BLOCK_SIZE);
+
+ return TpBlockCtsDecrypt(key, length - offset, buffer + offset);
+}
+
+///////////////////////////////////////////////////////////////////////////
+#undef LOG_TAG
+#define LOG_TAG "ClearKeyDescramblerPlugin"
+
+bool ClearKeyDescramblerPlugin::requiresSecureDecoderComponent(
+ const char *mime) const {
+ ALOGV("requiresSecureDecoderComponent: mime=%s", mime);
+ return false;
+}
+
+status_t ClearKeyDescramblerPlugin::setMediaCasSession(
+ const CasSessionId &sessionId) {
+ ALOGV("setMediaCasSession: sessionId=%s", sessionIdToString(sessionId).string());
+
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+
+ if (session == NULL) {
+ ALOGE("ClearKeyDescramblerPlugin: session not found");
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ mCASSession = session;
+ return OK;
+}
+
+ssize_t ClearKeyDescramblerPlugin::descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) {
+
+ ALOGV("descramble: secure=%d, sctrl=%d, subSamples=%s, "
+ "srcPtr=%p, dstPtr=%p, srcOffset=%d, dstOffset=%d",
+ (int)secure, (int)scramblingControl,
+ subSamplesToString(subSamples, numSubSamples).string(),
+ srcPtr, dstPtr, srcOffset, dstOffset);
+
+ if (mCASSession == NULL) {
+ ALOGE("Uninitialized CAS session!");
+ return ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED;
+ }
+
+ return mCASSession->decrypt(
+ secure, scramblingControl,
+ numSubSamples, subSamples,
+ (uint8_t*)srcPtr + srcOffset,
+ dstPtr == NULL ? NULL : ((uint8_t*)dstPtr + dstOffset),
+ errorDetailMsg);
+}
+
+// Conversion utilities
+String8 ClearKeyDescramblerPlugin::arrayToString(
+ uint8_t const *array, size_t len) const
+{
+ String8 result("{ ");
+ for (size_t i = 0; i < len; i++) {
+ result.appendFormat("0x%02x ", array[i]);
+ }
+ result += "}";
+ return result;
+}
+
+String8 ClearKeyDescramblerPlugin::subSamplesToString(
+ SubSample const *subSamples, size_t numSubSamples) const
+{
+ String8 result;
+ for (size_t i = 0; i < numSubSamples; i++) {
+ result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
+ subSamples[i].mNumBytesOfClearData,
+ subSamples[i].mNumBytesOfEncryptedData);
+ }
+ return result;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
new file mode 100644
index 0000000..8cf9b90
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_CAS_PLUGIN_H_
+#define CLEARKEY_CAS_PLUGIN_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+
+extern "C" {
+ android::CasFactory *createCasFactory();
+ android::DescramblerFactory *createDescramblerFactory();
+}
+
+namespace android {
+namespace clearkeycas {
+
+class KeyFetcher;
+class ClearKeyCasSession;
+
+class ClearKeyCasFactory : public CasFactory {
+public:
+ ClearKeyCasFactory() {}
+ virtual ~ClearKeyCasFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) override;
+};
+
+class ClearKeyDescramblerFactory : public DescramblerFactory {
+public:
+ ClearKeyDescramblerFactory() {}
+ virtual ~ClearKeyDescramblerFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id, DescramblerPlugin **plugin) override;
+};
+
+class ClearKeyCasPlugin : public CasPlugin {
+public:
+ ClearKeyCasPlugin(uint64_t appData, CasPluginCallback callback);
+ virtual ~ClearKeyCasPlugin();
+
+ virtual status_t setPrivateData(
+ const CasData &data) override;
+
+ virtual status_t openSession(
+ uint16_t program_number, CasSessionId *sessionId) override;
+
+ virtual status_t openSession(
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) override;
+
+ virtual status_t closeSession(
+ const CasSessionId &sessionId) override;
+
+ virtual status_t setSessionPrivateData(
+ const CasSessionId &sessionId,
+ const CasData &data) override;
+
+ virtual status_t processEcm(
+ const CasSessionId &sessionId, const CasEcm &ecm) override;
+
+ virtual status_t processEmm(const CasEmm &emm) override;
+
+ virtual status_t sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
+ virtual status_t provision(const String8 &str) override;
+
+ virtual status_t refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) override;
+
+private:
+ Mutex mKeyFetcherLock;
+ std::unique_ptr<KeyFetcher> mKeyFetcher;
+ CasPluginCallback mCallback;
+ uint64_t mAppData;
+};
+
+class ClearKeyDescramblerPlugin : public DescramblerPlugin {
+public:
+ ClearKeyDescramblerPlugin() {}
+ virtual ~ClearKeyDescramblerPlugin() {};
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const override;
+
+ virtual status_t setMediaCasSession(
+ const CasSessionId &sessionId) override;
+
+ virtual ssize_t descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) override;
+
+private:
+ sp<ClearKeyCasSession> mCASSession;
+
+ String8 subSamplesToString(
+ SubSample const *subSamples,
+ size_t numSubSamples) const;
+ String8 arrayToString(uint8_t const *array, size_t len) const;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEARKEY_CAS_PLUGIN_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
new file mode 100644
index 0000000..210bab3
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyFetcher"
+
+#include <algorithm>
+#include <string>
+
+#include "ClearKeyFetcher.h"
+#include "ecm.h"
+#include "LicenseFetcher.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+ClearKeyFetcher::ClearKeyFetcher(
+ std::unique_ptr<LicenseFetcher> license_fetcher) :
+ initialized_(false),
+ license_fetcher_(std::move(license_fetcher)) {
+ CHECK(license_fetcher_);
+}
+
+ClearKeyFetcher::~ClearKeyFetcher() {}
+
+// This is a no-op but other KeyFetcher subclasses require initialization
+// so this is necessary to preserve the contract.
+status_t ClearKeyFetcher::Init() {
+ initialized_ = true;
+ return OK;
+}
+
+status_t ClearKeyFetcher::ObtainKey(const sp<ABuffer>& buffer,
+ uint64_t* asset_id, std::vector<KeyInfo>* keys) {
+ CHECK(asset_id);
+ CHECK(keys);
+ CHECK(initialized_);
+ *asset_id = 0;
+ keys->clear();
+
+ EcmContainer container;
+ status_t status = container.Parse(buffer);
+ if (status != OK) {
+ return status;
+ }
+ ALOGV("descriptor_size=%zu", container.descriptor_size());
+
+ // Sanity check to verify that the BroadcastEncryptor is sending a properly
+ // formed EcmContainer. If it contains two Ecms, the ids should have different
+ // parity (one odd, one even). This does not necessarily affect decryption
+ // but indicates a problem with Ecm generation.
+ if (container.descriptor_size() == 2) {
+ // XOR the least significant bits to verify different parity.
+ bool same_parity = (((container.descriptor(0).id() & 0x01) ^
+ (container.descriptor(1).id() & 0x01)) == 0);
+ if (same_parity) {
+ ALOGW("asset_id=%llu: malformed Ecm, "
+ "content keys have same parity, id0=%d, id1=%d",
+ container.descriptor(0).ecm().asset_id(),
+ container.descriptor(0).id(),
+ container.descriptor(1).id());
+ }
+ }
+
+ *asset_id = container.descriptor(0).ecm().asset_id();
+
+ // Detect asset_id change. This could be caused by a configuration change
+ // in the BroadcastEncryptor. This is unusual so log it in case it is an
+ // operational mistake. This invalidates the current asset_key causing a
+ // new license to be fetched.
+ // TODO(rkint): test against BroadcastEncryptor to verify what BE sends on
+ // asset_id change. If it sends an EcmContainer with 2 Ecms with different
+ // asset_ids (old and new) then it might be best to prefetch the Emm.
+ if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
+ ALOGW("Asset_id change from %llu to %llu", asset_.id(), *asset_id);
+ asset_.Clear();
+ }
+
+ // Fetch license to get asset_id
+ if (!asset_.has_id()) {
+ status = license_fetcher_->FetchLicense(*asset_id, &asset_);
+ if (status != OK) {
+ *asset_id = 0;
+ return status;
+ }
+ ALOGV("FetchLicense succeeded, has_id=%d", asset_.has_id());
+ }
+ keys->resize(container.descriptor_size());
+
+ for (size_t i = 0; i < container.descriptor_size(); ++i) {
+ status = container.mutable_descriptor(i)->mutable_ecm()->Decrypt(
+ container.descriptor(i).ecm().buffer(), asset_);
+ if (status != OK) {
+ *asset_id = 0;
+ keys->clear();
+ return status;
+ }
+ // TODO: if 2 Ecms have same parity, key from Ecm with higher id
+ // should be keys[1].
+ KeyInfo key;
+ key.key_id = container.descriptor(i).id();
+ key.key_bytes = container.descriptor(i).ecm().content_key();
+
+ keys->at(key.key_id & 1) = key;
+ }
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h
new file mode 100644
index 0000000..d58b9df
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_FETCHER_H_
+#define CLEAR_KEY_FETCHER_H_
+
+#include <vector>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABase.h>
+#include "KeyFetcher.h"
+
+namespace android {
+namespace clearkeycas {
+
+class LicenseFetcher;
+
+class ClearKeyFetcher : public KeyFetcher {
+public:
+ // ClearKeyFetcher takes ownership of |license_fetcher|.
+ explicit ClearKeyFetcher(
+ std::unique_ptr<LicenseFetcher> license_fetcher);
+
+ virtual ~ClearKeyFetcher();
+
+ // Initializes the fetcher. Must be called before ObtainKey.
+ status_t Init() override;
+
+ // Obtains the |asset_id| and |keys| from the Ecm contained in |ecm|.
+ // Returns
+ // - errors returned by EcmContainer::Parse.
+ // - errors returned by ClassicLicenseFetcher::FetchLicense.
+ // - errors returned by Ecm::Decrypt.
+ // |asset_id| and |keys| are owned by the caller and cannot be null.
+ // Init() must have been called.
+ status_t ObtainKey(const sp<ABuffer>& ecm, uint64_t* asset_id,
+ std::vector<KeyInfo>* keys) override;
+
+private:
+ clearkeycas::Asset asset_;
+ bool initialized_;
+ std::unique_ptr<LicenseFetcher> license_fetcher_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeyFetcher);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp
new file mode 100644
index 0000000..603337d
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyLicenseFetcher"
+
+#include "ClearKeyLicenseFetcher.h"
+#include "protos/license_protos.pb.h"
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "JsonAssetLoader.h"
+
+namespace android {
+namespace clearkeycas {
+
+status_t ClearKeyLicenseFetcher::Init(const char *input) {
+ JsonAssetLoader *extractor = new JsonAssetLoader();
+ return extractor->extractAssetFromString(String8(input), &asset_);
+}
+
+status_t ClearKeyLicenseFetcher::FetchLicense(
+ uint64_t /* asset_id */, Asset* asset) {
+ *asset = asset_;
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h
new file mode 100644
index 0000000..ebbcbeb
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_LICENSE_FETCHER_H_
+#define CLEAR_KEY_LICENSE_FETCHER_H_
+
+#include "KeyFetcher.h"
+#include "LicenseFetcher.h"
+
+namespace android {
+namespace clearkeycas {
+
+class ClearKeyLicenseFetcher : public LicenseFetcher {
+public:
+ ClearKeyLicenseFetcher() {}
+ virtual ~ClearKeyLicenseFetcher() {}
+
+ virtual status_t Init(const char *input);
+
+ virtual status_t FetchLicense(uint64_t asset_id, Asset* asset);
+
+private:
+ Asset asset_;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_LICENSE_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp
new file mode 100644
index 0000000..4e5f479
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeySessionLibrary"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include "ClearKeySessionLibrary.h"
+
+namespace android {
+namespace clearkeycas {
+
+Mutex ClearKeySessionLibrary::sSingletonLock;
+ClearKeySessionLibrary* ClearKeySessionLibrary::sSingleton = NULL;
+
+inline bool operator < (
+ const SessionInfo& lhs, const SessionInfo& rhs) {
+ if (lhs.plugin < rhs.plugin) return true;
+ else if (lhs.plugin > rhs.plugin) return false;
+
+ if (lhs.program_number < rhs.program_number) return true;
+ else if (lhs.program_number > rhs.program_number) return false;
+
+ return lhs.elementary_PID < rhs.elementary_PID;
+}
+
+ClearKeyCasSession::ClearKeyCasSession(const SessionInfo &info)
+ : mSessionInfo(info) {
+ mKeyInfo[0].valid = mKeyInfo[1].valid = false;
+}
+
+ClearKeyCasSession::~ClearKeyCasSession() {
+}
+
+const SessionInfo& ClearKeyCasSession::getSessionInfo() const {
+ return mSessionInfo;
+}
+
+ClearKeySessionLibrary* ClearKeySessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGV("Instantiating Session Library Singleton.");
+ sSingleton = new ClearKeySessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+ClearKeySessionLibrary::ClearKeySessionLibrary() : mNextSessionId(1) {}
+
+status_t ClearKeySessionLibrary::addSession(
+ CasPlugin *plugin,
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) {
+ CHECK(sessionId);
+
+ Mutex::Autolock lock(mSessionsLock);
+
+ SessionInfo info = {plugin, program_number, elementary_PID};
+ ssize_t index = mSessionInfoToIDMap.indexOfKey(info);
+ if (index >= 0) {
+ ALOGW("Session already exists: program_number=%u, elementary_PID=%u",
+ program_number, elementary_PID);
+ *sessionId = mSessionInfoToIDMap[index];
+ return OK;
+ }
+
+ sp<ClearKeyCasSession> session = new ClearKeyCasSession(info);
+
+ uint8_t *byteArray = (uint8_t *) &mNextSessionId;
+ sessionId->push_back(byteArray[3]);
+ sessionId->push_back(byteArray[2]);
+ sessionId->push_back(byteArray[1]);
+ sessionId->push_back(byteArray[0]);
+ mNextSessionId++;
+
+ mSessionInfoToIDMap.add(info, *sessionId);
+ mIDToSessionMap.add(*sessionId, session);
+ return OK;
+}
+
+sp<ClearKeyCasSession> ClearKeySessionLibrary::findSession(
+ const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return NULL;
+ }
+ return mIDToSessionMap.valueFor(sessionId);
+}
+
+void ClearKeySessionLibrary::destroySession(const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return;
+ }
+
+ sp<ClearKeyCasSession> session = mIDToSessionMap.valueAt(index);
+ mSessionInfoToIDMap.removeItem(session->getSessionInfo());
+ mIDToSessionMap.removeItemsAt(index);
+}
+
+void ClearKeySessionLibrary::destroyPlugin(CasPlugin *plugin) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ for (ssize_t index = mSessionInfoToIDMap.size() - 1; index >= 0; index--) {
+ const SessionInfo &info = mSessionInfoToIDMap.keyAt(index);
+ if (info.plugin == plugin) {
+ const CasSessionId &id = mSessionInfoToIDMap.valueAt(index);
+ mIDToSessionMap.removeItem(id);
+ mSessionInfoToIDMap.removeItemsAt(index);
+ }
+ }
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
new file mode 100644
index 0000000..ebefa72
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_SESSION_LIBRARY_H_
+#define CLEARKEY_SESSION_LIBRARY_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <openssl/aes.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+struct ABuffer;
+
+namespace clearkeycas {
+class KeyFetcher;
+
+struct SessionInfo {
+ CasPlugin *plugin;
+ uint16_t program_number;
+ uint16_t elementary_PID;
+};
+
+class ClearKeyCasSession : public RefBase {
+public:
+ ssize_t decrypt(
+ bool secure,
+ DescramblerPlugin::ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const DescramblerPlugin::SubSample *subSamples,
+ const void *srcPtr,
+ void *dstPtr,
+ AString * /* errorDetailMsg */);
+
+ status_t updateECM(KeyFetcher *keyFetcher, void *ecm, size_t size);
+
+private:
+ enum {
+ kNumKeys = 2,
+ };
+ struct KeyInfo {
+ bool valid;
+ AES_KEY contentKey;
+ };
+ sp<ABuffer> mEcmBuffer;
+ Mutex mKeyLock;
+ SessionInfo mSessionInfo;
+ KeyInfo mKeyInfo[kNumKeys];
+
+ friend class ClearKeySessionLibrary;
+
+ explicit ClearKeyCasSession(const SessionInfo &info);
+ virtual ~ClearKeyCasSession();
+ const SessionInfo& getSessionInfo() const;
+ status_t decryptPayload(
+ const AES_KEY& key, size_t length, size_t offset, char* buffer) const;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeyCasSession);
+};
+
+class ClearKeySessionLibrary {
+public:
+ static ClearKeySessionLibrary* get();
+
+ status_t addSession(
+ CasPlugin *plugin,
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId);
+
+ sp<ClearKeyCasSession> findSession(const CasSessionId& sessionId);
+
+ void destroySession(const CasSessionId& sessionId);
+
+ void destroyPlugin(CasPlugin *plugin);
+
+private:
+ static Mutex sSingletonLock;
+ static ClearKeySessionLibrary* sSingleton;
+
+ Mutex mSessionsLock;
+ uint32_t mNextSessionId;
+ KeyedVector<CasSessionId, sp<ClearKeyCasSession>> mIDToSessionMap;
+ KeyedVector<SessionInfo, CasSessionId> mSessionInfoToIDMap;
+
+ ClearKeySessionLibrary();
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeySessionLibrary);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEARKEY_SESSION_LIBRARY_H_
diff --git a/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp b/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp
new file mode 100644
index 0000000..9cd77e9
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp
@@ -0,0 +1,234 @@
+
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "JsonAssetLoader"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+#include "JsonAssetLoader.h"
+#include "protos/license_protos.pb.h"
+
+namespace android {
+namespace clearkeycas {
+
+const String8 kIdTag("id");
+const String8 kNameTag("name");
+const String8 kLowerCaseOgranizationNameTag("lowercase_organization_name");
+const String8 kEncryptionKeyTag("encryption_key");
+const String8 kCasTypeTag("cas_type");
+const String8 kBase64Padding("=");
+
+const uint32_t kKeyLength = 16;
+
+JsonAssetLoader::JsonAssetLoader() {
+}
+
+JsonAssetLoader::~JsonAssetLoader() {
+}
+
+/*
+ * Extract a clear key asset from a JSON string.
+ *
+ * Returns OK if a clear key asset is extracted successfully,
+ * or ERROR_DRM_NO_LICENSE if the string doesn't contain a valid
+ * clear key asset.
+ */
+status_t JsonAssetLoader::extractAssetFromString(
+ const String8& jsonAssetString, Asset *asset) {
+ if (!parseJsonAssetString(jsonAssetString, &mJsonObjects)) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+
+ if (mJsonObjects.size() < 1) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+
+ if (!parseJsonObject(mJsonObjects[0], &mTokens))
+ return ERROR_DRM_NO_LICENSE;
+
+ if (!findKey(mJsonObjects[0], asset)) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+ return OK;
+}
+
+//static
+sp<ABuffer> JsonAssetLoader::decodeBase64String(const String8& encodedText) {
+ // Since android::decodeBase64() requires padding characters,
+ // add them so length of encodedText is exactly a multiple of 4.
+ int remainder = encodedText.length() % 4;
+ String8 paddedText(encodedText);
+ if (remainder > 0) {
+ for (int i = 0; i < 4 - remainder; ++i) {
+ paddedText.append(kBase64Padding);
+ }
+ }
+
+ return decodeBase64(AString(paddedText.string()));
+}
+
+bool JsonAssetLoader::findKey(const String8& jsonObject, Asset *asset) {
+
+ String8 value;
+
+ if (jsonObject.find(kIdTag) < 0) {
+ return false;
+ }
+ findValue(kIdTag, &value);
+ ALOGV("found %s=%s", kIdTag.string(), value.string());
+ asset->set_id(atoi(value.string()));
+
+ if (jsonObject.find(kNameTag) < 0) {
+ return false;
+ }
+ findValue(kNameTag, &value);
+ ALOGV("found %s=%s", kNameTag.string(), value.string());
+ asset->set_name(value.string());
+
+ if (jsonObject.find(kLowerCaseOgranizationNameTag) < 0) {
+ return false;
+ }
+ findValue(kLowerCaseOgranizationNameTag, &value);
+ ALOGV("found %s=%s", kLowerCaseOgranizationNameTag.string(), value.string());
+ asset->set_lowercase_organization_name(value.string());
+
+ if (jsonObject.find(kCasTypeTag) < 0) {
+ return false;
+ }
+ findValue(kCasTypeTag, &value);
+ ALOGV("found %s=%s", kCasTypeTag.string(), value.string());
+ // Asset_CasType_CLEARKEY_CAS = 1
+ asset->set_cas_type((Asset_CasType)atoi(value.string()));
+
+ return true;
+}
+
+void JsonAssetLoader::findValue(const String8 &key, String8* value) {
+ value->clear();
+ const char* valueToken;
+ for (Vector<String8>::const_iterator nextToken = mTokens.begin();
+ nextToken != mTokens.end(); ++nextToken) {
+ if (0 == (*nextToken).compare(key)) {
+ if (nextToken + 1 == mTokens.end())
+ break;
+ valueToken = (*(nextToken + 1)).string();
+ value->setTo(valueToken);
+ nextToken++;
+ break;
+ }
+ }
+}
+
+/*
+ * Parses a JSON objects string and initializes a vector of tokens.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonAssetLoader::parseJsonObject(const String8& jsonObject,
+ Vector<String8>* tokens) {
+ jsmn_parser parser;
+
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonObject.string(), jsonObject.size(), NULL, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.clear();
+ mJsmnTokens.setCapacity(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonObject.string(),
+ jsonObject.size(), mJsmnTokens.editArray(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ tokens->clear();
+ String8 token;
+ const char *pjs;
+ ALOGV("numTokens: %d", numTokens);
+ for (int j = 0; j < numTokens; ++j) {
+ pjs = jsonObject.string() + mJsmnTokens[j].start;
+ if (mJsmnTokens[j].type == JSMN_STRING ||
+ mJsmnTokens[j].type == JSMN_PRIMITIVE) {
+ token.setTo(pjs, mJsmnTokens[j].end - mJsmnTokens[j].start);
+ tokens->add(token);
+ ALOGV("add token: %s", token.string());
+ }
+ }
+ return true;
+}
+
+/*
+ * Parses JSON asset string and initializes a vector of JSON objects.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonAssetLoader::parseJsonAssetString(const String8& jsonAsset,
+ Vector<String8>* jsonObjects) {
+ if (jsonAsset.isEmpty()) {
+ ALOGE("Empty JSON Web Key");
+ return false;
+ }
+
+ // The jsmn parser only supports unicode encoding.
+ jsmn_parser parser;
+
+ // Computes number of tokens. A token marks the type, offset in
+ // the original string.
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonAsset.string(), jsonAsset.size(), NULL, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.setCapacity(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonAsset.string(),
+ jsonAsset.size(), mJsmnTokens.editArray(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ String8 token;
+ const char *pjs;
+ for (int i = 0; i < numTokens; ++i) {
+ pjs = jsonAsset.string() + mJsmnTokens[i].start;
+ if (mJsmnTokens[i].type == JSMN_OBJECT) {
+ token.setTo(pjs, mJsmnTokens[i].end - mJsmnTokens[i].start);
+ jsonObjects->add(token);
+ }
+ }
+ return true;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/JsonAssetLoader.h b/drm/mediacas/plugins/clearkey/JsonAssetLoader.h
new file mode 100644
index 0000000..06f9389
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/JsonAssetLoader.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef JSON_ASSET_LOADER_H_
+#define JSON_ASSET_LOADER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/StrongPointer.h>
+#include "protos/license_protos.pb.h"
+
+#include "jsmn.h"
+
+namespace android {
+struct ABuffer;
+
+namespace clearkeycas {
+
+class JsonAssetLoader {
+ public:
+ JsonAssetLoader();
+ virtual ~JsonAssetLoader();
+
+ status_t extractAssetFromString(
+ const String8& jsonAssetString, Asset *asset);
+
+ private:
+ Vector<jsmntok_t> mJsmnTokens;
+ Vector<String8> mJsonObjects;
+ Vector<String8> mTokens;
+
+ static sp<ABuffer> decodeBase64String(
+ const String8& encodedText);
+ bool findKey(const String8& jsonObject, Asset *asset);
+ void findValue(
+ const String8 &key, String8* value);
+ bool parseJsonObject(
+ const String8& jsonObject, Vector<String8>* tokens);
+ bool parseJsonAssetString(
+ const String8& jsonString, Vector<String8>* jsonObjects);
+
+ DISALLOW_EVIL_CONSTRUCTORS(JsonAssetLoader);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // JSON_ASSET_LOADER_H_
diff --git a/drm/mediacas/plugins/clearkey/KeyFetcher.h b/drm/mediacas/plugins/clearkey/KeyFetcher.h
new file mode 100644
index 0000000..83fe50a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/KeyFetcher.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef KEY_FETCHER_H_
+#define KEY_FETCHER_H_
+
+#include <vector>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+
+// Interface for classes which extract the content key from an Ecm.
+class KeyFetcher {
+public:
+ struct KeyInfo {
+ sp<ABuffer> key_bytes;
+ int key_id;
+ };
+
+ KeyFetcher() {}
+ virtual ~KeyFetcher() {}
+
+ // Initializes resources set in subclass-specific calls. This must be called
+ // before threads are started.
+ virtual status_t Init() = 0;
+
+ // Obtains content key(s) based on contents of |ecm|. |asset_id| is the
+ // internal id of the asset, |keys| is a vector containing instances of a
+ // class containing a content key and an id. |asset_id| and |keys| are
+ // owned by the caller and must be non-null.
+ virtual status_t ObtainKey(const sp<ABuffer>& ecm,
+ uint64_t* asset_id, vector<KeyInfo>* keys) = 0;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // KEY_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/LicenseFetcher.h b/drm/mediacas/plugins/clearkey/LicenseFetcher.h
new file mode 100644
index 0000000..2a33dd8
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/LicenseFetcher.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LICENSE_FETCHER_H_
+#define LICENSE_FETCHER_H_
+
+#include "protos/license_protos.pb.h"
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+namespace clearkeycas {
+
+// Interface for classes which request a license.
+class LicenseFetcher {
+public:
+ LicenseFetcher() {}
+ virtual ~LicenseFetcher() {}
+
+ // Initializes resources set in subclass-specific calls. This must be called
+ // before threads are started.
+ virtual status_t Init(const char *input) = 0;
+
+ // Fetches license based on |asset_id|. On return, |asset| contains the
+ // decrypted asset_key needed to decrypt content keys.
+ // |asset| must be non-null.
+ virtual status_t FetchLicense(
+ uint64_t asset_id, clearkeycas::Asset* asset) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(LicenseFetcher);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // LICENSE_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ecm.cpp b/drm/mediacas/plugins/clearkey/ecm.cpp
new file mode 100644
index 0000000..9fde13a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ecm"
+
+#include "ecm.h"
+#include "ecm_generator.h"
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+Ecm::Ecm()
+ : asset_id_(0),
+ asset_id_set_(false),
+ system_id_(0),
+ system_id_set_(false) {}
+
+Ecm::~Ecm() {}
+
+status_t Ecm::Parse(const sp<ABuffer>& buffer_as_binary) {
+ if (buffer_as_binary->size() < kSizeBytes) {
+ ALOGE("Short Ecm buffer: expected %zu, received %zu.",
+ kSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+
+ Asset asset;
+ ecm_generator::DefaultEcmFields default_fields;
+ status_t status = ecm_generator::DecodeECMClearFields(
+ buffer_as_binary, &asset, &default_fields);
+ if (status != OK) {
+ ALOGE("DecodeECMClearFields failed with status %d", status);
+ return status;
+ }
+ set_asset_id(asset.id());
+ set_system_id(default_fields.system_id);
+
+ // Save a copy of the buffer_as_binary for a future DecryptEcm call.
+ set_buffer(buffer_as_binary);
+ return OK;
+}
+
+status_t Ecm::Decrypt(
+ const sp<ABuffer>& buffer_as_binary,
+ const Asset& asset_from_emm) {
+ // Invariant: asset has id. These are postconditions for Emm::Decrypt().
+ CHECK(asset_from_emm.has_id());
+
+ // DecodeEcm fills in |asset|.id() with the asset_id from the encoded Ecm.
+ Asset asset(asset_from_emm);
+ ecm_generator::DefaultEcmFields default_fields;
+ sp<ABuffer> content_key;
+ status_t status = ecm_generator::DecodeECM(
+ buffer_as_binary, &asset, &content_key, &default_fields);
+ if (status != OK) {
+ ALOGE("DecodeECM failed with status %d", status);
+ return status;
+ }
+ if (asset.id() != asset_from_emm.id()) {
+ ALOGE("Asset_id from Emm (%llu) does not match asset_id from Ecm (%llu).",
+ asset_from_emm.id(), asset.id());
+ return CLEARKEY_STATUS_INVALID_PARAMETER;
+ }
+ set_asset_id(asset.id());
+ set_system_id(default_fields.system_id);
+ set_content_key(content_key);
+ return status;
+}
+
+EcmDescriptor::EcmDescriptor() : ecm_set_(false), id_(0), id_set_(false) {}
+
+EcmDescriptor::EcmDescriptor(uint16_t id, const Ecm& ecm)
+: ecm_(ecm), ecm_set_(true), id_(id), id_set_(true) {}
+
+EcmDescriptor::~EcmDescriptor() {}
+
+status_t EcmDescriptor::Parse(const sp<ABuffer>& buffer_as_binary) {
+ if (buffer_as_binary->size() < kSizeBytes) {
+ ALOGE("Short EcmDescriptor buffer: expected %zu, received %zu.",
+ kSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+ sp<ABuffer> id_buffer = new ABuffer(buffer_as_binary->data(), kIdSizeBytes);
+ const uint8_t *id_bytes = id_buffer->data();
+ uint16_t id = (id_bytes[0] << 8) | id_bytes[1];
+ set_id(id);
+
+ // Unmarshall the contained Ecm.
+ sp<ABuffer> ecm_buffer = new ABuffer(
+ buffer_as_binary->data() + kIdSizeBytes, Ecm::kSizeBytes);
+ status_t status = mutable_ecm()->Parse(ecm_buffer);
+ if (status != OK) {
+ return status;
+ }
+ return OK;
+}
+
+EcmContainer::EcmContainer() : count_(0), count_set_(false) {}
+
+EcmContainer::~EcmContainer() {}
+
+status_t EcmContainer::Add(const EcmDescriptor& descriptor) {
+ switch (count_) {
+ case 0:
+ descriptor_[0] = descriptor;
+ count_ = 1;
+ break;
+ case 1:
+ descriptor_[1] = descriptor;
+ count_ = 2;
+ break;
+ case 2:
+ descriptor_[0] = descriptor_[1];
+ descriptor_[1] = descriptor;
+ break;
+ default:
+ ALOGE("Bad state.");
+ return INVALID_OPERATION;
+ }
+ count_set_ = true;
+ return OK;
+}
+
+status_t EcmContainer::Parse(const sp<ABuffer>& buffer_as_binary) {
+ // EcmContainer can contain 1 or 2 EcmDescriptors so this is a check for
+ // minimum size.
+ if (buffer_as_binary->size() < kMinimumSizeBytes) {
+ ALOGE("Short EcmContainer buffer: expected >= %zu, received %zu.",
+ kMinimumSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+
+ sp<ABuffer> count_buffer = new ABuffer(
+ buffer_as_binary->data(), kCountSizeBytes);
+ const uint8_t *count_bytes = count_buffer->data();
+ size_t count = (count_bytes[0] << 8) | count_bytes[1];
+ // Check that count is a legal value.
+ if (!CountLegal(count)) {
+ ALOGE("Invalid descriptor count: expected %zu <= count <= %zu, received %zu.",
+ kMinDescriptorCount, kMaxDescriptorCount, count);
+ return ERROR_OUT_OF_RANGE;
+ }
+ // If needed, check that buffer_as_binary can hold 2 EcmDescriptors.
+ if (count > kMinDescriptorCount) {
+ size_t expected_bytes =
+ kCountSizeBytes + (count * EcmDescriptor::kSizeBytes);
+ if (buffer_as_binary->size() < expected_bytes) {
+ ALOGE("Short EcmContainer buffer: expected %zu, received %zu.",
+ expected_bytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+ }
+ set_count(count);
+ // Unmarshall the contained EcmDescriptors.
+ size_t offset = kCountSizeBytes;
+ for (size_t i = 0; i < count_; ++i) {
+ sp<ABuffer> descriptor_buffer = new ABuffer(
+ buffer_as_binary->data() + offset, EcmDescriptor::kSizeBytes);
+ status_t status = mutable_descriptor(i)->Parse(descriptor_buffer);
+ if (status != OK) {
+ return status;
+ }
+ offset += EcmDescriptor::kSizeBytes;
+ }
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ecm.h b/drm/mediacas/plugins/clearkey/ecm.h
new file mode 100644
index 0000000..aef8afb
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Data objects encapsulating the clear key Ecm (Entitlement Control
+// Message) and related container messages. Deserialization and decryption
+// are handled externally to reduce build-time dependencies.
+//
+// Simplified typical client-side use:
+// Asset asset; // from the AssetRegistry.
+// uint8[] ecm_buffer; // received over network, contains an EcmContainer.
+// EcmContainer ecm_container;
+// util::Status status = ecm_container.Parse(ecm_buffer);
+// status = ecm_container.descriptor(1).ecm().Decrypt(
+// ecm_container.descriptor(1).ecm().buffer(), asset_key);
+// string content_key;
+// if (ecm_container.descriptor(1).ecm().has_content_key()) {
+// content_key = ecm_container.descriptor(1).ecm().content_key();
+// }
+// // use |content_key| to decrypt content.
+//
+// Simplified typical server-side use:
+// EcmContainer container;
+// string encoded_ecm;
+// // Use the ecm_generator API to encode and encrypt an ECM from data fields.
+// util::Status status = ecm_generator::EncodeECM(..., &encoded_ecm);
+// // Use |encoded_ecm| to initialized the Ecm from this library.
+// Ecm ecm;
+// util::Status status = ecm.Parse(encoded_ecm);
+// EcmDescriptor descriptor(crypto_period_id, ecm);
+// status = container.Add(descriptor);
+// string serialized_container;
+// status = container.Marshall(&serialized_container);
+// // now |serialized_container| can be sent to the STB.
+//
+// Due to past overloading of the term "ECM" this library introduces some
+// new terminology.
+//
+// Ecm: the 32-byte message sent from the head end to a packager that contains
+// the asset_id, system_id, and content_key (clear).
+//
+// EcmDescriptor: contains an Ecm and an id (the crypto period id in the case
+// of the BroadcastEncryptor). It contains no encrypted fields.
+//
+// EcmContainer: sent by the server in the video stream using the ECM pid.
+// This contains 1 or 2 EcmDescriptors and a count. It contains no
+// encrypted fields.
+//
+// The first EcmContainer sent by the server has only one EcmDescriptor. After
+// the first crypto period change, an EcmContainer contains 2 EcmDescriptors.
+// One has an odd id and one has an even id. The decrypted content keys from the
+// Ecms in the EcmDescriptors are used by the Mpeg2 parser as odd and even
+// scrambling keys. As the crypto period changes, the oldest EcmDescriptor is
+// dropped from the EcmContainer and the new EcmDescriptor is added.
+//
+// These classes use a simplified protobuf model. For non-repeating fields,
+// - has_foo() indicates whether the field is populated.
+// - the accessor foo() returns either a value or a const reference.
+// - a mutator sets the value. Primitive types and strings use
+// set_foo(value) while for objects mutable_foo() returns a pointer.
+//
+// To prevent null references, objects (like the Asset contained in an Emm)
+// are allocated as members and can be accessed via foo() even if they have
+// not been populated. The caller must call has_foo() to make sure that the
+// object is valid. Calling mutable_foo() to obtain a pointer causes has_foo()
+// to return true.
+//
+// Repeated fields (like the EcmDescriptors contained in an EcmContainer) are
+// handled differently.
+// - foo_size() returns the number of instances.
+// - the accessor foo(index) returns either a value or a const reference to
+// the instance at index. It is illegal to call with |index| >= the value
+// returned by foo_size(). |index| is checked with CHECK.
+// - a mutator to change the value of the instance. Primitive types and
+// strings use set_foo(index, value) while for objects mutable_foo(index)
+// returns a pointer. It is illegal to call with |index| >= the value
+// returned by foo_size(). |index| is checked with CHECK.
+//
+// Accessing a repeated field with an invalid index causes CHECK to fail.
+// Be sure to call EcmContainer::decriptor_size() before calling descriptor()
+// or mutable_descriptor()!
+//
+#ifndef CLEAR_KEY_ECM_H_
+#define CLEAR_KEY_ECM_H_
+
+#include <stddef.h>
+#include <string>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+
+// Entitlement Control Message. It contains clear fields. The asset_id
+// and system_id as well as the content_key are clear.
+//
+// This class is not thread-safe.
+class Ecm {
+public:
+ // Wire size of ECM.
+ static constexpr size_t kSizeBytes = 16 + 16; // clear fields + clear key
+
+ // Creates an empty ECM which must be initialized via Parse().
+ Ecm();
+
+ ~Ecm();
+
+ // Parses clear fields of Ecm serialized in |buffer_as_binary| and saves
+ // a copy of |buffer_as_binary| for a future DecryptEcm call.
+ // Returns:
+ // - BAD_VALUE if |buffer_as_binary| is too small.
+ // - CLEARKEY_STATUS_INVALIDASSETID via ecm_generator::DecodeEcmClearFields if
+ // asset_id is 0.
+ // - CLEARKEY_STATUS_INVALIDSYSTEMID via ecm_generator::DecodeEcmClearFields if
+ // system_id is 0.
+ // Postconditions:
+ // - |asset_id_| and |system_id_| are populated with non-zero values.
+ // - |buffer_| contains a copy of the serialized Ecm.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ // Parses and decrypts Ecm serialized in |buffer_as_binary| using
+ // |asset_from_emm|.asset_key().encryption_key(). It is not necessary to call
+ // Parse() first.
+ // Returns BAD_VALUE if |buffer_as_binary| is too small.
+ // Returns CLEARKEY_STATUS_INVALIDASSETID via
+ // ecm_generator::DecodeEcmClearFields if asset_id is 0.
+ // Returns CLEARKEY_STATUS_INVALIDSYSTEMID via
+ // ecm_generator::DecodeEcmClearFields if system_id is 0.
+ // Returns CLEARKEY_STATUS_INVALID_PARAMETER if
+ // - asset_id in |asset_from_emm| does not match asset_id in serialized Ecm.
+ // Preconditions: |asset_from_emm| must contain asset_id and asset_key fields.
+ // Postconditions: asset_id() and system_id() are populated with non-zero
+ // values, content_key() is populated with the clear content key.
+ status_t Decrypt(const sp<ABuffer>& buffer_as_binary,
+ const Asset& asset_from_emm);
+
+ // |buffer_| is a serialized copy of the Ecm used for later decryption or
+ // for marshalling.
+ inline bool has_buffer() const { return buffer_ != NULL; }
+ const sp<ABuffer> buffer() const { return buffer_; }
+ inline void set_buffer(const sp<ABuffer>& buffer) {
+ buffer_ = ABuffer::CreateAsCopy(buffer->data(), buffer->size());
+ }
+
+ // |content_key| is the clear, encryption/decryption key generated by the server.
+ inline bool has_content_key() const { return content_key_ != NULL; }
+ inline void set_content_key(const sp<ABuffer>& value) {
+ content_key_ = ABuffer::CreateAsCopy(value->data(), value->size());
+ }
+ inline const sp<ABuffer> content_key() const { return content_key_; }
+
+ // |asset_id| from the server.
+ inline bool has_asset_id() const { return asset_id_set_; }
+ inline uint64_t asset_id() const { return asset_id_; }
+ inline void set_asset_id(uint64_t value) {
+ asset_id_ = value;
+ asset_id_set_ = true;
+ }
+
+ // |system_id| from the server.
+ inline bool has_system_id() const { return system_id_set_; }
+ inline uint32_t system_id() const { return system_id_; }
+ inline void set_system_id(uint32_t value) {
+ system_id_ = value;
+ system_id_set_ = true;
+ }
+
+private:
+ uint64_t asset_id_;
+ bool asset_id_set_;
+ sp<ABuffer> buffer_;
+ sp<ABuffer> content_key_;
+ uint32_t system_id_;
+ bool system_id_set_;
+};
+
+// Contains an Ecm and and Id.
+// This class is not thread-safe.
+class EcmDescriptor {
+public:
+ // Wire size of Id field.
+ static constexpr size_t kIdSizeBytes = sizeof(uint16_t);
+ // Wire size of EcmDescriptor.
+ static constexpr size_t kSizeBytes = Ecm::kSizeBytes + kIdSizeBytes;
+
+ // Client-side ctor. Populate from a buffer with Parse().
+ EcmDescriptor();
+
+ // Server-side ctor.
+ // Args:
+ // - |id| is the crypto period ID.
+ // - |ecm| is an ECM which must have been intialized with Ecm::Parse().
+ EcmDescriptor(uint16_t id, const Ecm& ecm);
+
+ ~EcmDescriptor();
+
+ // Parses EcmDescriptor and its contained Ecm which are serialized in the
+ // binary string |buffer_as_binary|.
+ // Returns
+ // - BAD_VALUE if |buffer_as_binary| is too short to contain a
+ // serialized EcmDescriptor.
+ // - Errors returned by Ecm::Parse.
+ // Postconditions:
+ // - id() is populated. Note that 0 is a legal value.
+ // - the clear fields of the contained Ecm have been populated.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ // |id| of the contained Ecm. Typically the crypto period id.
+ inline bool has_id() const { return id_set_; }
+ inline void set_id(uint16_t value) {
+ id_ = value;
+ id_set_ = true;
+ }
+ inline uint16_t id() const { return id_; }
+
+ // The contained |ecm|.
+ inline bool has_ecm() const { return ecm_set_; }
+ inline Ecm* mutable_ecm() {
+ ecm_set_ = true;
+ return &ecm_;
+ }
+ inline const Ecm& ecm() const { return ecm_; }
+
+private:
+ Ecm ecm_;
+ bool ecm_set_;
+ uint16_t id_;
+ bool id_set_;
+};
+
+// Contains a count and 1 or 2 EcmDescriptors. This is included in the video
+// stream by the sender in the ECM pid.
+// This class is not thread-safe.
+class EcmContainer {
+public:
+ // Wire size of the count field.
+ static constexpr size_t kCountSizeBytes = sizeof(uint16_t);
+ // Minimum wire size assuming one EcmDescriptor.
+ static constexpr size_t kMinimumSizeBytes =
+ EcmDescriptor::kSizeBytes + kCountSizeBytes;
+ static constexpr size_t kMinDescriptorCount = 1;
+ static constexpr size_t kMaxDescriptorCount = 2;
+
+ // Creates an empty EcmContainer which must be populated via Parse()
+ // (client-side) or Add() (server-side).
+ EcmContainer();
+
+ ~EcmContainer();
+
+ // Adds an EcmDescriptor for server-side applications.
+ // If |count_| is 2, |descriptor| replaces the oldest EcmDescriptor.
+ //
+ // Returns:
+ // - INTERNAL if the EcmContainer is in a bad state (count != 0, 1, or 2).
+ // Postconditions:
+ // - count() is within bounds (1 or 2).
+ status_t Add(const EcmDescriptor& descriptor);
+
+ // Parses EcmContainer and its contained EcmDescriptors which are serialized
+ // in |buffer_as_binary|.
+ // Returns
+ // - BAD_VALUE if |buffer_as_binary| is too short to contain a
+ // serialized EcmDescriptor.
+ // - ERROR_OUT_OF_RANGE if the count contained in the serialized EcmContainer
+ // is not 1 or 2.
+ // - Errors returned by EcmDescriptor::Parse.
+ // Postconditions:
+ // - count() is within bounds (1 or 2) and.
+ // - contained EcmDescriptor(s) parsed and populated.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ inline bool has_count() const { return count_set_; }
+ // Sets the |count| of contained EcmDecriptors. Illegal values are silently
+ // ignored.
+ inline void set_count(size_t count) {
+ if (!CountLegal(count)) return;
+ count_ = count;
+ count_set_ = true;
+ }
+ // Number of contained EcmDecriptors. Only 1 and 2 are legal values.
+ inline size_t count() const { return count_; }
+
+ // Returns the number of allowable descriptors. This is redundant but is
+ // provided for protobuf compatibility.
+ inline size_t descriptor_size() const { return count_; }
+
+ // Returns a pointer to the EcmDescriptor at |index| for valid index values,
+ // otherwise calls CHECK and aborts. Always call descriptor_size() first!
+ inline EcmDescriptor* mutable_descriptor(size_t index) {
+ //CHECK(IndexValid(index));
+ return &descriptor_[index];
+ }
+
+ // Returns a reference to the EcmDescriptor at |index| for valid index
+ // values, otherwise calls CHECK and aborts. Call descriptor_size() first!
+ inline const EcmDescriptor& descriptor(size_t index) const {
+ //CHECK(IndexValid(index));
+ return descriptor_[index];
+ }
+
+private:
+ // Count value must be 1 or 2.
+ inline bool CountLegal(size_t count) const {
+ return count <= kMaxDescriptorCount && count >= kMinDescriptorCount;
+ }
+ // Index must be 0 or 1.
+ inline bool IndexLegal(size_t index) const {
+ return index < kMaxDescriptorCount;
+ }
+ // |index| is valid for this object: it is legal and < count_.
+ inline bool IndexValid(size_t index) const {
+ if (!IndexLegal(index)) return false;
+ return index < count_;
+ }
+ size_t count_;
+ bool count_set_;
+ EcmDescriptor descriptor_[kMaxDescriptorCount];
+
+ DISALLOW_EVIL_CONSTRUCTORS(EcmContainer);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_ECM_H_
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.cpp b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
new file mode 100644
index 0000000..f1aa973
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ecm_generator"
+#include "ecm_generator.h"
+
+#include <string.h>
+#include <algorithm>
+#include <endian.h>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <openssl/aes.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+// These constants are internal to this module.
+const uint16_t kEcmClearFieldsSize = 16;
+const uint32_t kContentKeyByteSize = 16;
+const uint16_t kTotalEcmSize =
+ kEcmClearFieldsSize + kContentKeyByteSize; // clear fields + clear key
+
+const uint32_t kKeyLength = 16;
+
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p))
+
+static uint32_t Load32(const void *p) {
+ return ntohl(UNALIGNED_LOAD32(p));
+}
+
+static uint32_t LoadNext32(const uint8_t** pptr) {
+ CHECK(pptr);
+ CHECK(*pptr);
+ const uint8_t* p = *pptr;
+ *pptr += sizeof(uint32_t);
+ return Load32(p);
+}
+
+namespace ecm_generator {
+
+status_t DecodeECM(const sp<ABuffer>& ecm, Asset* asset,
+ sp<ABuffer> *content_key, DefaultEcmFields* default_fields) {
+ CHECK(asset);
+ CHECK(content_key);
+ CHECK(default_fields);
+
+ status_t status = DecodeECMClearFields(ecm, asset, default_fields);
+ if (status != OK) {
+ return status;
+ }
+
+ const uint8_t* ptr = ecm->data() + kEcmClearFieldsSize;
+ *content_key = new ABuffer(kContentKeyByteSize);
+ memcpy((*content_key)->data(), ptr, kContentKeyByteSize);
+
+ return OK;
+}
+
+status_t DecodeECMClearFields(const sp<ABuffer>& ecm, Asset* asset,
+ DefaultEcmFields* default_fields) {
+ CHECK(asset);
+ CHECK(default_fields);
+
+ if (ecm->size() < kTotalEcmSize) {
+ ALOGE("Short ECM: expected_length=%zu, actual_length=%zu",
+ kTotalEcmSize, ecm->size());
+ return BAD_VALUE;
+ }
+ const uint8_t* ptr = ecm->data();
+ default_fields->old_version = LoadNext32(&ptr);
+ default_fields->clear_lead = LoadNext32(&ptr);
+ default_fields->system_id = LoadNext32(&ptr);
+ // The real ecm version is hidden in the system id.
+ default_fields->ecm_version = (default_fields->system_id >> 24) & 0xFF;
+ default_fields->system_id &= 0x00FFFFFF;
+ if (default_fields->system_id == 0) {
+ ALOGE("Ecm has invalid system_id 0");
+ return CLEARKEY_STATUS_INVALIDSYSTEMID;
+ }
+ asset->set_id(LoadNext32(&ptr));
+ if (asset->id() == 0) {
+ ALOGE("Ecm has invalid asset_id 0");
+ return CLEARKEY_STATUS_INVALIDASSETID;
+ }
+ return OK;
+}
+
+} // namespace ecm_generator
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.h b/drm/mediacas/plugins/clearkey/ecm_generator.h
new file mode 100644
index 0000000..2ef06c4
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_ECM_GENERATOR_H_
+#define CLEAR_KEY_ECM_GENERATOR_H_
+
+#include <string>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaErrors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+enum {
+ CLEARKEY_STATUS_BASE = ERROR_DRM_VENDOR_MAX,
+ CLEARKEY_STATUS_INVALIDASSETID = CLEARKEY_STATUS_BASE - 1,
+ CLEARKEY_STATUS_INVALIDSYSTEMID = CLEARKEY_STATUS_BASE - 2,
+ CLEARKEY_STATUS_INVALID_PARAMETER = CLEARKEY_STATUS_BASE - 3,
+};
+class Organization;
+
+namespace ecm_generator {
+
+// Layout of the ECM
+// ECM
+// 0 - 3 : Old ECM version (deprecated)
+// 4 - 7 : Clear lead (milliseconds)
+// 8 : ECM Version
+// 9 - 11 : System ID
+// 12 - 15 : Asset ID
+// 16 - 31 : Content Key (clear)
+//
+// The clear asset ID (bytes 12-15) is compared to the encrypted asset ID
+// (bytes 48-51) as a consistency check.
+
+struct DefaultEcmFields {
+ uint32_t old_version;
+ uint32_t clear_lead;
+ uint32_t ecm_version;
+ uint32_t system_id;
+};
+
+// Decodes a clear key ecm.
+// The following fields are decoded from the clear fields portion of the ecm:
+// asset->id
+// default_fields->old_version
+// default_fields->clear_lead
+// default_fields->system_id
+// default_fields->ecm_version
+//
+// The following fields are decoded from the content key portion of the ecm:
+// content_key
+//
+// |asset|, |content_key|, |default_fields| are owned by caller and must not
+// be NULL.
+// Returns failure via ecm_generator::DecodeECMClearFields.
+//
+// Example usage:
+// Asset asset;
+// string content_key;
+// DefaultEcmFields default_fields;
+// // Get a clear key |ecm|.
+// status_t status = ecm_generator::DecodeECM(ecm, &asset, &content_key, &default_fields);
+status_t DecodeECM(const sp<ABuffer>& ecm, Asset* asset,
+ sp<ABuffer> *content_key, DefaultEcmFields* default_fields);
+
+// Decodes the following fields from the clear fields portion of the ecm:
+// asset->id
+// default_fields->old_version
+// default_fields->clear_lead
+// default_fields->system_id
+// default_fields->ecm_version
+//
+// offset, asset and default_fields are owned by caller and must not be NULL.
+// offset is updated to show the number of bytes consumed.
+// Returns:
+// - BAD_VALUE on short ECM, or
+// - CLEARKEY_STATUS_INVALIDASSETID via ecm_generator::DecodeEcmClearFields if
+// asset_id is 0, or
+// - CLEARKEY_STATUS_INVALIDSYSTEMID via ecm_generator::DecodeEcmClearFields if
+// system_id is 0.
+//
+// Example usage:
+// Asset asset;
+// DefaultEcmFields default_fields;
+// // Get a clear key ecm.
+// status_t status = ecm_generator::DecodeECMClearFields(ecm, &asset, &default_fields);
+status_t DecodeECMClearFields(const sp<ABuffer>& ecm, Asset* asset,
+ DefaultEcmFields* default_fields);
+
+} // namespace ecm_generator
+} // namespace clearkeycas
+} // namespace android
+#endif // CLEAR_KEY_ECM_GENERATOR_H_
diff --git a/drm/mediacas/plugins/clearkey/protos/license_protos.proto b/drm/mediacas/plugins/clearkey/protos/license_protos.proto
new file mode 100644
index 0000000..397145d
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/protos/license_protos.proto
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.clearkeycas;
+
+option java_package = "com.google.video.clearkey.protos";
+
+// The Asset is the data describing licensing requirements and polciy for a
+// customer's video asset.
+//
+// The asset_id must not be set on creation. It is only used for assets of
+// CasType: CLEARKEY_CAS.
+//
+message Asset {
+ // Indicates the type of digital rights management scheme used.
+ // CLEARKEY_CAS: Clearkey Media CAS.
+ enum CasType {
+ UNKNOWN = 0;
+ CLEARKEY_CAS = 1;
+ }
+
+ // Must be unset on creation. Required for mutate operations on CLEARKEY_CAS assets.
+ optional uint64 id = 1;
+
+ // Organization-specified name of the asset. Required. Must not be empty.
+ // 'bytes' instead of 'string' due to UTF-8 validation in the latter.
+ optional bytes name = 2;
+
+ // The lowercase_organization_name is required. It's a foreign key to the
+ // Organization table and part of the primary key for the Asset table.
+ optional string lowercase_organization_name = 3;
+
+ // The policy_name is required. It's a foreign key to the policy table.
+ optional string policy_name = 4; // Name of the Policy to apply to this asset.
+
+ // Key information for decrypting content. Not used for CLEARKEY_CAS.
+ optional AssetKey asset_key = 5;
+
+ optional CasType cas_type = 6 [default = UNKNOWN];
+}
+
+// AssetKey defines a key that can be used to decrypt the license.
+// Note: In the previous implementation, the schema accommodated multiple
+// asset keys per asset. This is not supported in this implementation.
+message AssetKey {
+ optional bytes encryption_key = 1; // 256-byte key for the asset.
+}
diff --git a/drm/mediacas/plugins/clearkey/tests/Android.mk b/drm/mediacas/plugins/clearkey/tests/Android.mk
new file mode 100644
index 0000000..5418c1d
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/tests/Android.mk
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ ClearKeyFetcherTest.cpp
+
+LOCAL_MODULE := ClearKeyFetcherTest
+
+# LOCAL_LDFLAGS is needed here for the test to use the plugin, because
+# the plugin is not in standard library search path. Without this .so
+# loading fails at run-time (linking is okay).
+LOCAL_LDFLAGS := \
+ -Wl,--rpath,\$${ORIGIN}/../../../system/vendor/lib/mediacas -Wl,--enable-new-dtags
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils libclearkeycasplugin libstagefright_foundation libprotobuf-cpp-lite liblog
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/drm/mediacas/plugins/clearkey \
+ $(TOP)/frameworks/av/include \
+ $(TOP)/frameworks/native/include/media \
+
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_NATIVE_TEST)
+
+
+
diff --git a/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp b/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp
new file mode 100644
index 0000000..ace086a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyFetcherTest"
+#include <utils/Log.h>
+#include <gtest/gtest.h>
+#include <stddef.h>
+#include <algorithm>
+#include <string>
+
+#include "ClearKeyFetcher.h"
+#include "ClearKeyLicenseFetcher.h"
+#include "protos/license_protos.pb.h"
+
+namespace android {
+namespace clearkeycas {
+
+const char *kTestAssetInJson =
+ "{ "
+ " \"id\": 21140844, "
+ " \"name\": \"Test Title\", "
+ " \"lowercase_organization_name\": \"Android\", "
+ " \"asset_key\": { "
+ " \"encryption_key\": \"nezAr3CHFrmBR9R8Tedotw==\" "
+ " }, "
+ " \"cas_type\": 1, "
+ " \"track_types\": [ ] "
+ "} " ;
+
+const uint8_t kTestEcmContainer[] = {
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x27, 0x10, 0x02, 0x00, 0x01, 0x77,
+ 0x01, 0x42, 0x95, 0x6c, 0x0e, 0xe3, 0x91, 0xbc,
+ 0xfd, 0x05, 0xb1, 0x60, 0x4f, 0x17, 0x82, 0xa4,
+ 0x86, 0x9b, 0x23, 0x56, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x27, 0x10, 0x02, 0x00,
+ 0x01, 0x77, 0x01, 0x42, 0x95, 0x6c, 0xd7, 0x43,
+ 0x62, 0xf8, 0x1c, 0x62, 0x19, 0x05, 0xc7, 0x3a,
+ 0x42, 0xcd, 0xfd, 0xd9, 0x13, 0x48,
+};
+
+const uint8_t kTestContentKey0[] = {
+ 0x0e, 0xe3, 0x91, 0xbc, 0xfd, 0x05, 0xb1, 0x60,
+ 0x4f, 0x17, 0x82, 0xa4, 0x86, 0x9b, 0x23, 0x56};
+
+const uint8_t kTestContentKey1[] = {
+ 0xd7, 0x43, 0x62, 0xf8, 0x1c, 0x62, 0x19, 0x05,
+ 0xc7, 0x3a, 0x42, 0xcd, 0xfd, 0xd9, 0x13, 0x48};
+
+constexpr uint32_t kTestEcmCount = 2;
+
+class ClearKeyFetcherTest : public testing::Test {
+protected:
+ virtual void SetUp();
+
+protected:
+ std::unique_ptr<ClearKeyLicenseFetcher> license_fetcher_;
+ sp<ABuffer> ecm_;
+ sp<ABuffer> content_key_[kTestEcmCount];
+};
+
+void ClearKeyFetcherTest::SetUp() {
+ license_fetcher_.reset(new ClearKeyLicenseFetcher());
+ EXPECT_EQ(OK, license_fetcher_->Init(kTestAssetInJson));
+ ecm_ = new ABuffer((void*) (kTestEcmContainer), sizeof(kTestEcmContainer));
+ content_key_[0] = new ABuffer(
+ (void*)kTestContentKey0, sizeof(kTestContentKey0));
+ content_key_[1] = new ABuffer(
+ (void*)kTestContentKey1, sizeof(kTestContentKey1));
+}
+
+TEST_F(ClearKeyFetcherTest, Ctor) {
+ ClearKeyFetcher fetcher(std::move(license_fetcher_));
+}
+
+TEST_F(ClearKeyFetcherTest, Success) {
+ ClearKeyFetcher fetcher(std::move(license_fetcher_));
+ EXPECT_EQ(OK, fetcher.Init());
+ uint64_t asset_id;
+ std::vector<KeyFetcher::KeyInfo> keys;
+ EXPECT_EQ(OK, fetcher.ObtainKey(ecm_, &asset_id, &keys));
+ EXPECT_EQ(2, keys.size());
+ EXPECT_EQ(0, keys[0].key_id);
+ EXPECT_EQ(content_key_[0]->size(), keys[0].key_bytes->size());
+ EXPECT_EQ(0, memcmp(content_key_[0]->data(),
+ keys[0].key_bytes->data(), content_key_[0]->size()));
+ EXPECT_EQ(1, keys[1].key_id);
+ EXPECT_EQ(content_key_[1]->size(), keys[1].key_bytes->size());
+ EXPECT_EQ(0, memcmp(content_key_[1]->data(),
+ keys[1].key_bytes->data(), content_key_[1]->size()));
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/mock/Android.mk b/drm/mediacas/plugins/mock/Android.mk
new file mode 100644
index 0000000..a97fac6
--- /dev/null
+++ b/drm/mediacas/plugins/mock/Android.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ MockCasPlugin.cpp \
+ MockSessionLibrary.cpp \
+
+LOCAL_MODULE := libmockcasplugin
+
+LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := mediacas
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils liblog
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/include \
+ $(TOP)/frameworks/native/include/media \
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
new file mode 100644
index 0000000..12cac60
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "MockCasPlugin"
+
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+#include "MockCasPlugin.h"
+#include "MockSessionLibrary.h"
+
+android::CasFactory* createCasFactory() {
+ return new android::MockCasFactory();
+}
+
+android::DescramblerFactory* createDescramblerFactory() {
+ return new android::MockDescramblerFactory();
+}
+
+namespace android {
+
+static const int32_t sMockId = 0xFFFF;
+
+bool MockCasFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sMockId;
+}
+
+status_t MockCasFactory::queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const {
+ descriptors->clear();
+ descriptors->push_back({sMockId, String8("MockCAS")});
+ return OK;
+}
+
+status_t MockCasFactory::createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new MockCasPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool MockDescramblerFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sMockId;
+}
+
+status_t MockDescramblerFactory::createPlugin(
+ int32_t CA_system_id, DescramblerPlugin** plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new MockDescramblerPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static String8 arrayToString(const std::vector<uint8_t> &array) {
+ String8 result;
+ for (size_t i = 0; i < array.size(); i++) {
+ result.appendFormat("%02x ", array[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+MockCasPlugin::MockCasPlugin() {
+ ALOGV("CTOR");
+}
+
+MockCasPlugin::~MockCasPlugin() {
+ ALOGV("DTOR");
+ MockSessionLibrary::get()->destroyPlugin(this);
+}
+
+status_t MockCasPlugin::setPrivateData(const CasData &data) {
+ ALOGV("setPrivateData");
+ return OK;
+}
+
+status_t MockCasPlugin::openSession(
+ uint16_t program_number, CasSessionId* sessionId) {
+ ALOGV("openSession: program_number=%u", program_number);
+ return MockSessionLibrary::get()->addSession(
+ this, program_number, 0, sessionId);
+}
+
+status_t MockCasPlugin::openSession(
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) {
+ ALOGV("openSession: program_number=%u, elementary_PID=%u",
+ program_number, elementary_PID);
+
+ return MockSessionLibrary::get()->addSession(
+ this, program_number, elementary_PID, sessionId);
+}
+
+status_t MockCasPlugin::closeSession(const CasSessionId &sessionId) {
+ ALOGV("closeSession: sessionId=%s", arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+
+ MockSessionLibrary::get()->destroySession(sessionId);
+ return OK;
+}
+
+status_t MockCasPlugin::setSessionPrivateData(
+ const CasSessionId &sessionId, const CasData &data) {
+ ALOGV("setSessionPrivateData: sessionId=%s",
+ arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t MockCasPlugin::processEcm(
+ const CasSessionId &sessionId, const CasEcm& ecm) {
+ ALOGV("processEcm: sessionId=%s", arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("ECM: size=%d", ecm.size());
+ ALOGV("ECM: data=%s", arrayToString(ecm).string());
+
+ return OK;
+}
+
+status_t MockCasPlugin::processEmm(const CasEmm& emm) {
+ ALOGV("processEmm");
+ Mutex::Autolock lock(mLock);
+
+ ALOGV("EMM: size=%d", emm.size());
+ ALOGV("EMM: data=%s", arrayToString(emm).string());
+
+ return OK;
+}
+
+status_t MockCasPlugin::sendEvent(
+ int32_t event, int arg, const CasData &eventData) {
+ ALOGV("sendEvent: event=%d", event);
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+status_t MockCasPlugin::provision(const String8 &str) {
+ ALOGV("provision: provisionString=%s", str.string());
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+status_t MockCasPlugin::refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) {
+ ALOGV("refreshEntitlements: refreshData=%s", arrayToString(refreshData).string());
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////
+bool MockDescramblerPlugin::requiresSecureDecoderComponent(
+ const char *mime) const {
+ ALOGV("MockDescramblerPlugin::requiresSecureDecoderComponent"
+ "(mime=%s)", mime);
+ return false;
+}
+
+status_t MockDescramblerPlugin::setMediaCasSession(
+ const CasSessionId &sessionId) {
+ ALOGV("MockDescramblerPlugin::setMediaCasSession");
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+
+ if (session == NULL) {
+ ALOGE("MockDescramblerPlugin: session not found");
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ return OK;
+}
+
+ssize_t MockDescramblerPlugin::descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) {
+ ALOGV("MockDescramblerPlugin::descramble(secure=%d, sctrl=%d,"
+ "subSamples=%s, srcPtr=%p, dstPtr=%p, srcOffset=%d, dstOffset=%d)",
+ (int)secure, (int)scramblingControl,
+ subSamplesToString(subSamples, numSubSamples).string(),
+ srcPtr, dstPtr, srcOffset, dstOffset);
+
+ return 0;
+}
+
+// Conversion utilities
+String8 MockDescramblerPlugin::arrayToString(
+ uint8_t const *array, size_t len) const
+{
+ String8 result("{ ");
+ for (size_t i = 0; i < len; i++) {
+ result.appendFormat("0x%02x ", array[i]);
+ }
+ result += "}";
+ return result;
+}
+
+String8 MockDescramblerPlugin::subSamplesToString(
+ SubSample const *subSamples, size_t numSubSamples) const
+{
+ String8 result;
+ for (size_t i = 0; i < numSubSamples; i++) {
+ result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
+ subSamples[i].mNumBytesOfClearData,
+ subSamples[i].mNumBytesOfEncryptedData);
+ }
+ return result;
+}
+
+} // namespace android
+
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
new file mode 100644
index 0000000..91c8855
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOCK_CAS_PLUGIN_H_
+#define MOCK_CAS_PLUGIN_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <utils/Mutex.h>
+
+extern "C" {
+ android::CasFactory *createCasFactory();
+ android::DescramblerFactory *createDescramblerFactory();
+}
+
+namespace android {
+
+class MockCasFactory : public CasFactory {
+public:
+ MockCasFactory() {}
+ virtual ~MockCasFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) override;
+};
+
+class MockDescramblerFactory : public DescramblerFactory {
+public:
+ MockDescramblerFactory() {}
+ virtual ~MockDescramblerFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id, DescramblerPlugin **plugin) override;
+};
+
+class MockCasPlugin : public CasPlugin {
+public:
+ MockCasPlugin();
+ virtual ~MockCasPlugin();
+
+ virtual status_t setPrivateData(
+ const CasData &data) override;
+
+ virtual status_t openSession(
+ uint16_t program_number, CasSessionId *sessionId) override;
+
+ virtual status_t openSession(
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) override;
+
+ virtual status_t closeSession(
+ const CasSessionId &sessionId) override;
+
+ virtual status_t setSessionPrivateData(
+ const CasSessionId &sessionId,
+ const CasData &data) override;
+
+ virtual status_t processEcm(
+ const CasSessionId &sessionId, const CasEcm &ecm) override;
+
+ virtual status_t processEmm(const CasEmm &emm) override;
+
+ virtual status_t sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
+ virtual status_t provision(const String8 &str) override;
+
+ virtual status_t refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) override;
+
+private:
+
+ Mutex mLock;
+};
+
+class MockDescramblerPlugin : public DescramblerPlugin {
+public:
+ MockDescramblerPlugin() {}
+ virtual ~MockDescramblerPlugin() {};
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const override;
+
+ virtual status_t setMediaCasSession(
+ const CasSessionId &sessionId) override;
+
+ virtual ssize_t descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) override;
+
+private:
+ String8 subSamplesToString(
+ SubSample const *subSamples,
+ size_t numSubSamples) const;
+ String8 arrayToString(uint8_t const *array, size_t len) const;
+};
+} // namespace android
+
+#endif // MOCK_CAS_PLUGIN_H_
diff --git a/drm/mediacas/plugins/mock/MockSessionLibrary.cpp b/drm/mediacas/plugins/mock/MockSessionLibrary.cpp
new file mode 100644
index 0000000..8679a04
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockSessionLibrary.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MockSessionLibrary"
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "MockSessionLibrary.h"
+
+namespace android {
+
+Mutex MockSessionLibrary::sSingletonLock;
+MockSessionLibrary* MockSessionLibrary::sSingleton = NULL;
+
+inline bool operator < (
+ const SessionInfo& lhs,
+ const SessionInfo& rhs) {
+ if (lhs.plugin < rhs.plugin) return true;
+ else if (lhs.plugin > rhs.plugin) return false;
+
+ if (lhs.program_number < rhs.program_number) return true;
+ else if (lhs.program_number > rhs.program_number) return false;
+
+ return lhs.elementary_PID < rhs.elementary_PID;
+}
+
+void MockCasSession::setSessionInfo(const SessionInfo &info) {
+ mSessionInfo = info;
+}
+
+const SessionInfo& MockCasSession::getSessionInfo() const {
+ return mSessionInfo;
+}
+
+MockSessionLibrary* MockSessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGD("Instantiating Session Library Singleton.");
+ sSingleton = new MockSessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+MockSessionLibrary::MockSessionLibrary() : mNextSessionId(1) {}
+
+status_t MockSessionLibrary::addSession(
+ CasPlugin *plugin,
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ SessionInfo info = {plugin, program_number, elementary_PID};
+ ssize_t index = mSessionInfoToIDMap.indexOfKey(info);
+ if (index >= 0) {
+ ALOGW("Session already exists: program_number=%u, elementary_PID=%u",
+ program_number, elementary_PID);
+ *sessionId = mSessionInfoToIDMap[index];
+ return OK;
+ }
+
+ sp<MockCasSession> session = new MockCasSession();
+ session->setSessionInfo(info);
+
+ uint8_t *byteArray = (uint8_t *) &mNextSessionId;
+ sessionId->push_back(byteArray[3]);
+ sessionId->push_back(byteArray[2]);
+ sessionId->push_back(byteArray[1]);
+ sessionId->push_back(byteArray[0]);
+ mNextSessionId++;
+
+ mSessionInfoToIDMap.add(info, *sessionId);
+ mIDToSessionMap.add(*sessionId, session);
+ return OK;
+}
+
+sp<MockCasSession> MockSessionLibrary::findSession(
+ const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return NULL;
+ }
+ return mIDToSessionMap.valueFor(sessionId);
+}
+
+void MockSessionLibrary::destroySession(const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return;
+ }
+
+ sp<MockCasSession> session = mIDToSessionMap.valueAt(index);
+ mSessionInfoToIDMap.removeItem(session->getSessionInfo());
+ mIDToSessionMap.removeItemsAt(index);
+}
+
+void MockSessionLibrary::destroyPlugin(CasPlugin *plugin) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ for (ssize_t index = mSessionInfoToIDMap.size() - 1; index >= 0; index--) {
+ const SessionInfo &info = mSessionInfoToIDMap.keyAt(index);
+ if (info.plugin == plugin) {
+ const CasSessionId &id = mSessionInfoToIDMap.valueAt(index);
+ mIDToSessionMap.removeItem(id);
+ mSessionInfoToIDMap.removeItemsAt(index);
+ }
+ }
+}
+
+} // namespace android
diff --git a/drm/mediacas/plugins/mock/MockSessionLibrary.h b/drm/mediacas/plugins/mock/MockSessionLibrary.h
new file mode 100644
index 0000000..d28cbdc
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockSessionLibrary.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOCK_CAS_SESSION_LIBRARY_H_
+#define MOCK_CAS_SESSION_LIBRARY_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct SessionInfo {
+ CasPlugin *plugin;
+ uint16_t program_number;
+ uint16_t elementary_PID;
+};
+
+class MockCasSession : public RefBase {
+public:
+ explicit MockCasSession() {}
+ virtual ~MockCasSession() {}
+
+private:
+ friend class MockSessionLibrary;
+ SessionInfo mSessionInfo;
+
+ void setSessionInfo(const SessionInfo &info);
+ const SessionInfo& getSessionInfo() const;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MockCasSession);
+};
+
+class MockSessionLibrary {
+public:
+ static MockSessionLibrary* get();
+
+ status_t addSession(
+ CasPlugin *plugin,
+ uint16_t program_number,
+ uint16_t elementary_PID,
+ CasSessionId *sessionId);
+
+ sp<MockCasSession> findSession(const CasSessionId& sessionId);
+
+ void destroySession(const CasSessionId& sessionId);
+
+ void destroyPlugin(CasPlugin *plugin);
+
+private:
+ static Mutex sSingletonLock;
+ static MockSessionLibrary* sSingleton;
+
+ Mutex mSessionsLock;
+ uint32_t mNextSessionId;
+ KeyedVector<CasSessionId, sp<MockCasSession> > mIDToSessionMap;
+ KeyedVector<SessionInfo, CasSessionId> mSessionInfoToIDMap;
+
+ MockSessionLibrary();
+ DISALLOW_EVIL_CONSTRUCTORS(MockSessionLibrary);
+};
+} // namespace android
+
+#endif // MOCK_CAS_SESSION_LIBRARY_H_
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 8356bcc..8cc5ee9 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -39,8 +39,9 @@
sp<Session> session = mSessionLibrary->findSession(sessionId);
if (session.get()) {
mSessionLibrary->destroySession(session);
+ return android::OK;
}
- return android::OK;
+ return android::ERROR_DRM_SESSION_NOT_OPENED;
}
status_t DrmPlugin::getKeyRequest(
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
index 46d7f77..0419f97 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
@@ -43,7 +43,7 @@
return sSingleton;
}
-const sp<Session>& SessionLibrary::createSession() {
+sp<Session> SessionLibrary::createSession() {
Mutex::Autolock lock(mSessionsLock);
String8 sessionIdString = String8::format("%u", mNextSessionId);
@@ -57,9 +57,12 @@
return mSessions.valueFor(sessionId);
}
-const sp<Session>& SessionLibrary::findSession(
+sp<Session> SessionLibrary::findSession(
const Vector<uint8_t>& sessionId) {
Mutex::Autolock lock(mSessionsLock);
+ if (mSessions.indexOfKey(sessionId) < 0) {
+ return sp<Session>(NULL);
+ }
return mSessions.valueFor(sessionId);
}
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
index 199ad64..6236fff 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.h
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
@@ -31,9 +31,9 @@
public:
static SessionLibrary* get();
- const android::sp<Session>& createSession();
+ android::sp<Session> createSession();
- const android::sp<Session>& findSession(
+ android::sp<Session> findSession(
const android::Vector<uint8_t>& sessionId);
void destroySession(const android::sp<Session>& session);
@@ -48,7 +48,7 @@
android::Mutex mSessionsLock;
uint32_t mNextSessionId;
- android::DefaultKeyedVector<android::Vector<uint8_t>, android::sp<Session> >
+ android::KeyedVector<android::Vector<uint8_t>, android::sp<Session> >
mSessions;
};
diff --git a/include/media/omx/1.0/Conversion.h b/include/media/omx/1.0/Conversion.h
index f3f8441..9816fe1 100644
--- a/include/media/omx/1.0/Conversion.h
+++ b/include/media/omx/1.0/Conversion.h
@@ -30,8 +30,8 @@
#include <binder/Status.h>
#include <ui/FenceTime.h>
#include <cutils/native_handle.h>
-#include <gui/IGraphicBufferProducer.h>
+#include <ui/GraphicBuffer.h>
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
@@ -40,10 +40,8 @@
#include <android/hardware/media/omx/1.0/types.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
-#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
-#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
#include <android/IGraphicBufferSource.h>
@@ -95,9 +93,6 @@
using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
using ::android::IOMXBufferSource;
-using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
-using ::android::IGraphicBufferProducer;
-
// native_handle_t helper functions.
/**
@@ -894,1279 +889,6 @@
#endif
}
-/**
- * Conversion functions for types outside media
- * ============================================
- *
- * Some objects in libui and libgui that were made to go through binder calls do
- * not expose ways to read or write their fields to the public. To pass an
- * object of this kind through the HIDL boundary, translation functions need to
- * work around the access restriction by using the publicly available
- * `flatten()` and `unflatten()` functions.
- *
- * All `flatten()` and `unflatten()` overloads follow the same convention as
- * follows:
- *
- * status_t flatten(ObjectType const& object,
- * [OtherType const& other, ...]
- * void*& buffer, size_t& size,
- * int*& fds, size_t& numFds)
- *
- * status_t unflatten(ObjectType* object,
- * [OtherType* other, ...,]
- * void*& buffer, size_t& size,
- * int*& fds, size_t& numFds)
- *
- * The number of `other` parameters varies depending on the `ObjectType`. For
- * example, in the process of unflattening an object that contains
- * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
- * be created.
- *
- * The last four parameters always work the same way in all overloads of
- * `flatten()` and `unflatten()`:
- * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
- * `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
- * `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
- * size (in ints) of the fd buffer pointed to by `fds`.
- * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
- * from, `size` is the size (in bytes) of the non-fd buffer pointed to by
- * `buffer`, `fds` is the pointer to the fd buffer to be read from, and
- * `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
- * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
- * will be advanced, while `size` and `numFds` will be decreased to reflect
- * how much storage/data of the two buffers (fd and non-fd) have been used.
- * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
- * `numFds` are invalid.
- *
- * The return value of a successful `flatten()` or `unflatten()` call will be
- * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
- *
- * For each object type that supports flattening, there will be two accompanying
- * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
- * return the size of the non-fd buffer that the object will need for
- * flattening. `getFdCount()` will return the size of the fd buffer that the
- * object will need for flattening.
- *
- * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
- * `flatten()` and `unflatten()`, are similar to functions of the same name in
- * the abstract class `Flattenable`. The only difference is that functions in
- * this file are not member functions of the object type. For example, we write
- *
- * flatten(x, buffer, size, fds, numFds)
- *
- * instead of
- *
- * x.flatten(buffer, size, fds, numFds)
- *
- * because we cannot modify the type of `x`.
- *
- * There is one exception to the naming convention: `hidl_handle` that
- * represents a fence. The four functions for this "Fence" type have the word
- * "Fence" attched to their names because the object type, which is
- * `hidl_handle`, does not carry the special meaning that the object itself can
- * only contain zero or one file descriptor.
- */
-
-// Ref: frameworks/native/libs/ui/Fence.cpp
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten a fence.
- *
- * \param[in] fence The input fence of type `hidl_handle`.
- * \return The required size of the flat buffer.
- *
- * The current version of this function always returns 4, which is the number of
- * bytes required to store the number of file descriptors contained in the fd
- * part of the flat buffer.
- */
-inline size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
- return 4;
-};
-
-/**
- * \brief Return the number of file descriptors contained in a fence.
- *
- * \param[in] fence The input fence of type `hidl_handle`.
- * \return `0` if \p fence does not contain a valid file descriptor, or `1`
- * otherwise.
- */
-inline size_t getFenceFdCount(hidl_handle const& fence) {
- return native_handle_read_fd(fence) == -1 ? 0 : 1;
-}
-
-/**
- * \brief Unflatten `Fence` to `hidl_handle`.
- *
- * \param[out] fence The destination `hidl_handle`.
- * \param[out] nh The underlying native handle.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will point to a newly created
- * native handle, which needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < 4) {
- return NO_MEMORY;
- }
-
- uint32_t numFdsInHandle;
- FlattenableUtils::read(buffer, size, numFdsInHandle);
-
- if (numFdsInHandle > 1) {
- return BAD_VALUE;
- }
-
- if (numFds < numFdsInHandle) {
- return NO_MEMORY;
- }
-
- if (numFdsInHandle) {
- *nh = native_handle_create_from_fd(*fds);
- if (*nh == nullptr) {
- return NO_MEMORY;
- }
- *fence = *nh;
- ++fds;
- --numFds;
- } else {
- *nh = nullptr;
- *fence = hidl_handle();
- }
-
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `hidl_handle` as `Fence`.
- *
- * \param[in] t The source `hidl_handle`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t flattenFence(hidl_handle const& fence,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFenceFlattenedSize(fence) ||
- numFds < getFenceFdCount(fence)) {
- return NO_MEMORY;
- }
- // Cast to uint32_t since the size of a size_t can vary between 32- and
- // 64-bit processes
- FlattenableUtils::write(buffer, size,
- static_cast<uint32_t>(getFenceFdCount(fence)));
- int fd = native_handle_read_fd(fence);
- if (fd != -1) {
- *fds = fd;
- ++fds;
- --numFds;
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Wrap `Fence` in `hidl_handle`.
- *
- * \param[out] t The wrapper of type `hidl_handle`.
- * \param[out] nh The native handle pointed to by \p t.
- * \param[in] l The source `Fence`.
- *
- * On success, \p nh will hold a newly created native handle, which must be
- * deleted manually with `native_handle_delete()` afterwards.
- */
-// wrap: Fence -> hidl_handle
-inline bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
- != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `hidl_handle` to `Fence`.
- *
- * \param[out] l The destination `Fence`. `l` must not have been used
- * (`l->isValid()` must return `false`) before this function is called.
- * \param[in] t The source `hidl_handle`.
- *
- * If \p t contains a valid file descriptor, it will be duplicated.
- */
-// convert: hidl_handle -> Fence
-inline bool convertTo(Fence* l, hidl_handle const& t) {
- int fd = native_handle_read_fd(t);
- if (fd != -1) {
- fd = dup(fd);
- if (fd == -1) {
- return false;
- }
- }
- native_handle_t* nh = native_handle_create_from_fd(fd);
- if (nh == nullptr) {
- if (fd != -1) {
- close(fd);
- }
- return false;
- }
-
- size_t const baseSize = getFenceFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- native_handle_delete(nh);
- return false;
- }
-
- size_t const baseNumFds = getFenceFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- native_handle_delete(nh);
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
- native_handle_delete(nh);
- return false;
- }
- native_handle_delete(nh);
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `FenceTimeSnapshot`.
- *
- * \param[in] t The input `FenceTimeSnapshot`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- IOmxBufferProducer::FenceTimeSnapshot const& t) {
- constexpr size_t min = sizeof(t.state);
- switch (t.state) {
- case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
- return min;
- case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
- return min + getFenceFlattenedSize(t.fence);
- case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
- return min + sizeof(
- ::android::FenceTime::Snapshot::signalTime);
- }
- return 0;
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `FenceTimeSnapshot`.
- *
- * \param[in] t The input `FenceTimeSnapshot`.
- * \return The number of file descriptors contained in \p snapshot.
- */
-inline size_t getFdCount(
- IOmxBufferProducer::FenceTimeSnapshot const& t) {
- return t.state ==
- IOmxBufferProducer::FenceTimeSnapshot::State::FENCE ?
- getFenceFdCount(t.fence) : 0;
-}
-
-/**
- * \brief Flatten `FenceTimeSnapshot`.
- *
- * \param[in] t The source `FenceTimeSnapshot`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate the file descriptor in `t.fence` if `t.state ==
- * FENCE`.
- */
-inline status_t flatten(IOmxBufferProducer::FenceTimeSnapshot const& t,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- switch (t.state) {
- case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::EMPTY);
- return NO_ERROR;
- case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::FENCE);
- return flattenFence(t.fence, buffer, size, fds, numFds);
- case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
- FlattenableUtils::write(buffer, size, t.signalTimeNs);
- return NO_ERROR;
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Unflatten `FenceTimeSnapshot`.
- *
- * \param[out] t The destination `FenceTimeSnapshot`.
- * \param[out] nh The underlying native handle.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR` and the constructed snapshot contains a
- * file descriptor, \p nh will be created to hold that file descriptor. In this
- * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
- */
-inline status_t unflatten(
- IOmxBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < sizeof(t->state)) {
- return NO_MEMORY;
- }
-
- *nh = nullptr;
- ::android::FenceTime::Snapshot::State state;
- FlattenableUtils::read(buffer, size, state);
- switch (state) {
- case ::android::FenceTime::Snapshot::State::EMPTY:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY;
- return NO_ERROR;
- case ::android::FenceTime::Snapshot::State::FENCE:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::FENCE;
- return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
- case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
- if (size < sizeof(t->signalTimeNs)) {
- return NO_MEMORY;
- }
- FlattenableUtils::read(buffer, size, t->signalTimeNs);
- return NO_ERROR;
- }
- return NO_ERROR;
-}
-
-// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
-
-/**
- * \brief Return a lower bound on the size of the non-fd buffer required to
- * flatten `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return A lower bound on the size of the flat buffer.
- */
-constexpr size_t minFlattenedSize(
- IOmxBufferProducer::FrameEventsDelta const& /* t */) {
- return sizeof(uint64_t) + // mFrameNumber
- sizeof(uint8_t) + // mIndex
- sizeof(uint8_t) + // mAddPostCompositeCalled
- sizeof(uint8_t) + // mAddRetireCalled
- sizeof(uint8_t) + // mAddReleaseCalled
- sizeof(nsecs_t) + // mPostedTime
- sizeof(nsecs_t) + // mRequestedPresentTime
- sizeof(nsecs_t) + // mLatchTime
- sizeof(nsecs_t) + // mFirstRefreshStartTime
- sizeof(nsecs_t); // mLastRefreshStartTime
-}
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- IOmxBufferProducer::FrameEventsDelta const& t) {
- return minFlattenedSize(t) +
- getFlattenedSize(t.gpuCompositionDoneFence) +
- getFlattenedSize(t.displayPresentFence) +
- getFlattenedSize(t.displayRetireFence) +
- getFlattenedSize(t.releaseFence);
-};
-
-/**
- * \brief Return the number of file descriptors contained in
- * `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- IOmxBufferProducer::FrameEventsDelta const& t) {
- return getFdCount(t.gpuCompositionDoneFence) +
- getFdCount(t.displayPresentFence) +
- getFdCount(t.displayRetireFence) +
- getFdCount(t.releaseFence);
-};
-
-/**
- * \brief Unflatten `FrameEventsDelta`.
- *
- * \param[out] t The destination `FrameEventsDelta`.
- * \param[out] nh The underlying array of native handles.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
- * populated with `nullptr` or newly created handles. Each non-null slot in \p
- * nh will need to be deleted manually with `native_handle_delete()`.
- */
-inline status_t unflatten(IOmxBufferProducer::FrameEventsDelta* t,
- std::vector<native_handle_t*>* nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < minFlattenedSize(*t)) {
- return NO_MEMORY;
- }
- FlattenableUtils::read(buffer, size, t->frameNumber);
-
- // These were written as uint8_t for alignment.
- uint8_t temp = 0;
- FlattenableUtils::read(buffer, size, temp);
- size_t index = static_cast<size_t>(temp);
- if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- t->index = static_cast<uint32_t>(index);
-
- FlattenableUtils::read(buffer, size, temp);
- t->addPostCompositeCalled = static_cast<bool>(temp);
- FlattenableUtils::read(buffer, size, temp);
- t->addRetireCalled = static_cast<bool>(temp);
- FlattenableUtils::read(buffer, size, temp);
- t->addReleaseCalled = static_cast<bool>(temp);
-
- FlattenableUtils::read(buffer, size, t->postedTimeNs);
- FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
- FlattenableUtils::read(buffer, size, t->latchTimeNs);
- FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
- FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
- FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
-
- // Fences
- IOmxBufferProducer::FenceTimeSnapshot* tSnapshot[4];
- tSnapshot[0] = &t->gpuCompositionDoneFence;
- tSnapshot[1] = &t->displayPresentFence;
- tSnapshot[2] = &t->displayRetireFence;
- tSnapshot[3] = &t->releaseFence;
- nh->resize(4);
- for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
- status_t status = unflatten(
- tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- while (snapshotIndex > 0) {
- --snapshotIndex;
- if ((*nh)[snapshotIndex] != nullptr) {
- native_handle_delete((*nh)[snapshotIndex]);
- }
- }
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `FrameEventsDelta`.
- *
- * \param[in] t The source `FrameEventsDelta`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate file descriptors contained in \p t.
- */
-// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
-// FrameEventsDelta::flatten
-inline status_t flatten(IOmxBufferProducer::FrameEventsDelta const& t,
- void*& buffer, size_t& size, int*& fds, size_t numFds) {
- // Check that t.index is within a valid range.
- if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
- || t.index > std::numeric_limits<uint8_t>::max()) {
- return BAD_VALUE;
- }
-
- FlattenableUtils::write(buffer, size, t.frameNumber);
-
- // These are static_cast to uint8_t for alignment.
- FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addRetireCalled));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
-
- FlattenableUtils::write(buffer, size, t.postedTimeNs);
- FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
- FlattenableUtils::write(buffer, size, t.latchTimeNs);
- FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
- FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
- FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
-
- // Fences
- IOmxBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
- tSnapshot[0] = &t.gpuCompositionDoneFence;
- tSnapshot[1] = &t.displayPresentFence;
- tSnapshot[2] = &t.displayRetireFence;
- tSnapshot[3] = &t.releaseFence;
- for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
- status_t status = flatten(
- *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
- size_t size = 4 + // mDeltas.size()
- sizeof(t.compositorTiming);
- for (size_t i = 0; i < t.deltas.size(); ++i) {
- size += getFlattenedSize(t.deltas[i]);
- }
- return size;
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
- size_t numFds = 0;
- for (size_t i = 0; i < t.deltas.size(); ++i) {
- numFds += getFdCount(t.deltas[i]);
- }
- return numFds;
-}
-
-/**
- * \brief Unflatten `FrameEventHistoryDelta`.
- *
- * \param[out] t The destination `FrameEventHistoryDelta`.
- * \param[out] nh The underlying array of arrays of native handles.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
- * newly created handles. The second dimension of \p nh will be 4. Each non-null
- * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
- */
-inline status_t unflatten(
- IOmxBufferProducer::FrameEventHistoryDelta* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < 4) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::read(buffer, size, t->compositorTiming);
-
- uint32_t deltaCount = 0;
- FlattenableUtils::read(buffer, size, deltaCount);
- if (static_cast<size_t>(deltaCount) >
- ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- t->deltas.resize(deltaCount);
- nh->resize(deltaCount);
- for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
- status_t status = unflatten(
- &(t->deltas[deltaIndex]), &((*nh)[deltaIndex]),
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `FrameEventHistoryDelta`.
- *
- * \param[in] t The source `FrameEventHistoryDelta`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate file descriptors contained in \p t.
- */
-inline status_t flatten(
- IOmxBufferProducer::FrameEventHistoryDelta const& t,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (t.deltas.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, t.compositorTiming);
-
- FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.deltas.size()));
- for (size_t deltaIndex = 0; deltaIndex < t.deltas.size(); ++deltaIndex) {
- status_t status = flatten(t.deltas[deltaIndex], buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Wrap `::android::FrameEventHistoryData` in
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[out] t The wrapper of type
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
- * \param[out] nh The array of array of native handles that are referred to by
- * members of \p t.
- * \param[in] l The source `::android::FrameEventHistoryDelta`.
- *
- * On success, each member of \p nh will be either `nullptr` or a newly created
- * native handle. All the non-`nullptr` elements must be deleted individually
- * with `native_handle_delete()`.
- */
-inline bool wrapAs(IOmxBufferProducer::FrameEventHistoryDelta* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- ::android::FrameEventHistoryDelta const& l) {
-
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `IOmxBufferProducer::FrameEventHistoryDelta` to
- * `::android::FrameEventHistoryDelta`.
- *
- * \param[out] l The destination `::android::FrameEventHistoryDelta`.
- * \param[in] t The source `IOmxBufferProducer::FrameEventHistoryDelta`.
- *
- * This function will duplicate all file descriptors contained in \p t.
- */
-inline bool convertTo(
- ::android::FrameEventHistoryDelta* l,
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
-
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = getFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/ui/Region.cpp
-
-/**
- * \brief Return the size of the buffer required to flatten `Region`.
- *
- * \param[in] t The input `Region`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(Region const& t) {
- return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
-}
-
-/**
- * \brief Unflatten `Region`.
- *
- * \param[out] t The destination `Region`.
- * \param[in,out] buffer The pointer to the flat buffer.
- * \param[in,out] size The size of the flat buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t unflatten(Region* t, void const*& buffer, size_t& size) {
- if (size < sizeof(uint32_t)) {
- return NO_MEMORY;
- }
-
- uint32_t numRects = 0;
- FlattenableUtils::read(buffer, size, numRects);
- if (size < numRects * sizeof(Rect)) {
- return NO_MEMORY;
- }
- if (numRects > (UINT32_MAX / sizeof(Rect))) {
- return NO_MEMORY;
- }
-
- t->resize(numRects);
- for (size_t r = 0; r < numRects; ++r) {
- ::android::Rect rect(::android::Rect::EMPTY_RECT);
- status_t status = rect.unflatten(buffer, size);
- if (status != NO_ERROR) {
- return status;
- }
- FlattenableUtils::advance(buffer, size, sizeof(rect));
- (*t)[r] = Rect{
- static_cast<int32_t>(rect.left),
- static_cast<int32_t>(rect.top),
- static_cast<int32_t>(rect.right),
- static_cast<int32_t>(rect.bottom)};
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `Region`.
- *
- * \param[in] t The source `Region`.
- * \param[in,out] buffer The pointer to the flat buffer.
- * \param[in,out] size The size of the flat buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t flatten(Region const& t, void*& buffer, size_t& size) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
- for (size_t r = 0; r < t.size(); ++r) {
- ::android::Rect rect(
- static_cast<int32_t>(t[r].left),
- static_cast<int32_t>(t[r].top),
- static_cast<int32_t>(t[r].right),
- static_cast<int32_t>(t[r].bottom));
- status_t status = rect.flatten(buffer, size);
- if (status != NO_ERROR) {
- return status;
- }
- FlattenableUtils::advance(buffer, size, sizeof(rect));
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Convert `::android::Region` to `Region`.
- *
- * \param[out] t The destination `Region`.
- * \param[in] l The source `::android::Region`.
- */
-// convert: ::android::Region -> Region
-inline bool convertTo(Region* t, ::android::Region const& l) {
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- if (l.flatten(buffer, size) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- if (unflatten(t, constBuffer, size) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `Region` to `::android::Region`.
- *
- * \param[out] l The destination `::android::Region`.
- * \param[in] t The source `Region`.
- */
-// convert: Region -> ::android::Region
-inline bool convertTo(::android::Region* l, Region const& t) {
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- if (flatten(t, buffer, size) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- if (l->unflatten(constBuffer, size) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
-// IGraphicBufferProducer::QueueBufferInput
-
-/**
- * \brief Return a lower bound on the size of the buffer required to flatten
- * `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
- * \return A lower bound on the size of the flat buffer.
- */
-constexpr size_t minFlattenedSize(
- IOmxBufferProducer::QueueBufferInput const& /* t */) {
- return sizeof(int64_t) + // timestamp
- sizeof(int) + // isAutoTimestamp
- sizeof(android_dataspace) + // dataSpace
- sizeof(::android::Rect) + // crop
- sizeof(int) + // scalingMode
- sizeof(uint32_t) + // transform
- sizeof(uint32_t) + // stickyTransform
- sizeof(bool); // getFrameTimestamps
-}
-
-/**
- * \brief Return the size of the buffer required to flatten
- * `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(IOmxBufferProducer::QueueBufferInput const& t) {
- return minFlattenedSize(t) +
- getFenceFlattenedSize(t.fence) +
- getFlattenedSize(t.surfaceDamage);
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- IOmxBufferProducer::QueueBufferInput const& t) {
- return getFenceFdCount(t.fence);
-}
-
-/**
- * \brief Flatten `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
- * \param[out] nh The native handle cloned from `t.fence`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate the file descriptor in `t.fence`. */
-inline status_t flatten(IOmxBufferProducer::QueueBufferInput const& t,
- native_handle_t** nh,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, t.timestamp);
- FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
- FlattenableUtils::write(buffer, size,
- static_cast<android_dataspace_t>(t.dataSpace));
- FlattenableUtils::write(buffer, size, ::android::Rect(
- static_cast<int32_t>(t.crop.left),
- static_cast<int32_t>(t.crop.top),
- static_cast<int32_t>(t.crop.right),
- static_cast<int32_t>(t.crop.bottom)));
- FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
- FlattenableUtils::write(buffer, size, t.transform);
- FlattenableUtils::write(buffer, size, t.stickyTransform);
- FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
-
- *nh = t.fence.getNativeHandle() == nullptr ?
- nullptr : native_handle_clone(t.fence);
- status_t status = flattenFence(hidl_handle(*nh), buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- return flatten(t.surfaceDamage, buffer, size);
-}
-
-/**
- * \brief Unflatten `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[out] t The destination `IOmxBufferProducer::QueueBufferInput`.
- * \param[out] nh The underlying native handle for `t->fence`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR` and `t->fence` contains a valid file
- * descriptor, \p nh will be a newly created native handle holding that file
- * descriptor. \p nh needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline status_t unflatten(
- IOmxBufferProducer::QueueBufferInput* t, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < minFlattenedSize(*t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::read(buffer, size, t->timestamp);
- int lIsAutoTimestamp;
- FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
- t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
- android_dataspace_t lDataSpace;
- FlattenableUtils::read(buffer, size, lDataSpace);
- t->dataSpace = static_cast<Dataspace>(lDataSpace);
- Rect lCrop;
- FlattenableUtils::read(buffer, size, lCrop);
- t->crop = Rect{
- static_cast<int32_t>(lCrop.left),
- static_cast<int32_t>(lCrop.top),
- static_cast<int32_t>(lCrop.right),
- static_cast<int32_t>(lCrop.bottom)};
- int lScalingMode;
- FlattenableUtils::read(buffer, size, lScalingMode);
- t->scalingMode = static_cast<int32_t>(lScalingMode);
- FlattenableUtils::read(buffer, size, t->transform);
- FlattenableUtils::read(buffer, size, t->stickyTransform);
- FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
-
- status_t status = unflattenFence(&(t->fence), nh,
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- return unflatten(&(t->surfaceDamage), buffer, size);
-}
-
-/**
- * \brief Wrap `IGraphicBufferProducer::QueueBufferInput` in
- * `IOmxBufferProducer::QueueBufferInput`.
- *
- * \param[out] t The wrapper of type
- * `IOmxBufferProducer::QueueBufferInput`.
- * \param[out] nh The underlying native handle for `t->fence`.
- * \param[in] l The source `IGraphicBufferProducer::QueueBufferInput`.
- *
- * If the return value is `true` and `t->fence` contains a valid file
- * descriptor, \p nh will be a newly created native handle holding that file
- * descriptor. \p nh needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline bool wrapAs(
- IOmxBufferProducer::QueueBufferInput* t,
- native_handle_t** nh,
- IGraphicBufferProducer::QueueBufferInput const& l) {
-
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `IOmxBufferProducer::QueueBufferInput` to
- * `IGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[out] l The destination `IGraphicBufferProducer::QueueBufferInput`.
- * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
- *
- * If `t.fence` has a valid file descriptor, it will be duplicated.
- */
-inline bool convertTo(
- IGraphicBufferProducer::QueueBufferInput* l,
- IOmxBufferProducer::QueueBufferInput const& t) {
-
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = getFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- native_handle_t* nh;
- if (flatten(t, &nh, buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- native_handle_close(nh);
- native_handle_delete(nh);
- return false;
- }
-
- native_handle_delete(nh);
- return true;
-}
-
-// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
-// IGraphicBufferProducer::QueueBufferOutput
-
-/**
- * \brief Wrap `IGraphicBufferProducer::QueueBufferOutput` in
- * `IOmxBufferProducer::QueueBufferOutput`.
- *
- * \param[out] t The wrapper of type
- * `IOmxBufferProducer::QueueBufferOutput`.
- * \param[out] nh The array of array of native handles that are referred to by
- * members of \p t.
- * \param[in] l The source `IGraphicBufferProducer::QueueBufferOutput`.
- *
- * On success, each member of \p nh will be either `nullptr` or a newly created
- * native handle. All the non-`nullptr` elements must be deleted individually
- * with `native_handle_delete()`.
- */
-// wrap: IGraphicBufferProducer::QueueBufferOutput ->
-// IOmxBufferProducer::QueueBufferOutput
-inline bool wrapAs(IOmxBufferProducer::QueueBufferOutput* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- IGraphicBufferProducer::QueueBufferOutput const& l) {
- if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
- return false;
- }
- t->width = l.width;
- t->height = l.height;
- t->transformHint = l.transformHint;
- t->numPendingBuffers = l.numPendingBuffers;
- t->nextFrameNumber = l.nextFrameNumber;
- t->bufferReplaced = l.bufferReplaced;
- return true;
-}
-
-/**
- * \brief Convert `IOmxBufferProducer::QueueBufferOutput` to
- * `IGraphicBufferProducer::QueueBufferOutput`.
- *
- * \param[out] l The destination `IGraphicBufferProducer::QueueBufferOutput`.
- * \param[in] t The source `IOmxBufferProducer::QueueBufferOutput`.
- *
- * This function will duplicate all file descriptors contained in \p t.
- */
-// convert: IOmxBufferProducer::QueueBufferOutput ->
-// IGraphicBufferProducer::QueueBufferOutput
-inline bool convertTo(
- IGraphicBufferProducer::QueueBufferOutput* l,
- IOmxBufferProducer::QueueBufferOutput const& t) {
- if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
- return false;
- }
- l->width = t.width;
- l->height = t.height;
- l->transformHint = t.transformHint;
- l->numPendingBuffers = t.numPendingBuffers;
- l->nextFrameNumber = t.nextFrameNumber;
- l->bufferReplaced = t.bufferReplaced;
- return true;
-}
-
-/**
- * \brief Convert `IGraphicBufferProducer::DisconnectMode` to
- * `IOmxBufferProducer::DisconnectMode`.
- *
- * \param[in] l The source `IGraphicBufferProducer::DisconnectMode`.
- * \return The corresponding `IOmxBufferProducer::DisconnectMode`.
- */
-inline IOmxBufferProducer::DisconnectMode toOmxDisconnectMode(
- IGraphicBufferProducer::DisconnectMode l) {
- switch (l) {
- case IGraphicBufferProducer::DisconnectMode::Api:
- return IOmxBufferProducer::DisconnectMode::API;
- case IGraphicBufferProducer::DisconnectMode::AllLocal:
- return IOmxBufferProducer::DisconnectMode::ALL_LOCAL;
- }
- return IOmxBufferProducer::DisconnectMode::API;
-}
-
-/**
- * \brief Convert `IOmxBufferProducer::DisconnectMode` to
- * `IGraphicBufferProducer::DisconnectMode`.
- *
- * \param[in] l The source `IOmxBufferProducer::DisconnectMode`.
- * \return The corresponding `IGraphicBufferProducer::DisconnectMode`.
- */
-inline IGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
- IOmxBufferProducer::DisconnectMode t) {
- switch (t) {
- case IOmxBufferProducer::DisconnectMode::API:
- return IGraphicBufferProducer::DisconnectMode::Api;
- case IOmxBufferProducer::DisconnectMode::ALL_LOCAL:
- return IGraphicBufferProducer::DisconnectMode::AllLocal;
- }
- return IGraphicBufferProducer::DisconnectMode::Api;
-}
-
} // namespace utils
} // namespace V1_0
} // namespace omx
diff --git a/include/media/omx/1.0/WOmx.h b/include/media/omx/1.0/WOmx.h
index 9268bd6..f13546e 100644
--- a/include/media/omx/1.0/WOmx.h
+++ b/include/media/omx/1.0/WOmx.h
@@ -22,6 +22,7 @@
#include <media/IOMX.h>
+#include <hidl/HybridInterface.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
namespace android {
diff --git a/include/media/omx/1.0/WOmxBufferProducer.h b/include/media/omx/1.0/WOmxBufferProducer.h
deleted file mode 100644
index 54b9078..0000000
--- a/include/media/omx/1.0/WOmxBufferProducer.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
-
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <binder/Binder.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/IProducerListener.h>
-
-#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace utils {
-
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
-using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
-using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
-using ::android::hardware::media::omx::V1_0::Status;
-using ::android::hardware::media::V1_0::AnwBuffer;
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-using ::android::IGraphicBufferProducer;
-using ::android::BnGraphicBufferProducer;
-using ::android::IProducerListener;
-
-struct TWOmxBufferProducer : public IOmxBufferProducer {
- sp<IGraphicBufferProducer> mBase;
- TWOmxBufferProducer(sp<IGraphicBufferProducer> const& base);
- Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
- override;
- Return<Status> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
- override;
- Return<Status> setAsyncMode(bool async) override;
- Return<void> dequeueBuffer(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
- Return<Status> detachBuffer(int32_t slot) override;
- Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
- Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
- override;
- Return<void> queueBuffer(
- int32_t slot, const IOmxBufferProducer::QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) override;
- Return<Status> cancelBuffer(int32_t slot, const hidl_handle& fence)
- override;
- Return<void> query(int32_t what, query_cb _hidl_cb) override;
- Return<void> connect(const sp<IOmxProducerListener>& listener,
- int32_t api, bool producerControlledByApp,
- connect_cb _hidl_cb) override;
- Return<Status> disconnect(
- int32_t api,
- IOmxBufferProducer::DisconnectMode mode) override;
- Return<Status> setSidebandStream(const hidl_handle& stream) override;
- Return<void> allocateBuffers(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage) override;
- Return<Status> allowAllocation(bool allow) override;
- Return<Status> setGenerationNumber(uint32_t generationNumber) override;
- Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
- Return<Status> setSharedBufferMode(bool sharedBufferMode) override;
- Return<Status> setAutoRefresh(bool autoRefresh) override;
- Return<Status> setDequeueTimeout(int64_t timeoutNs) override;
- Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
- Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
- Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
-};
-
-struct LWOmxBufferProducer : public BnGraphicBufferProducer {
- sp<IOmxBufferProducer> mBase;
- LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
-
- status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) override;
- status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) override;
- status_t setAsyncMode(bool async) override;
- status_t dequeueBuffer(int* slot, sp<Fence>* fence, uint32_t w,
- uint32_t h, ::android::PixelFormat format, uint32_t usage,
- FrameEventHistoryDelta* outTimestamps) override;
- status_t detachBuffer(int slot) override;
- status_t detachNextBuffer(sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence)
- override;
- status_t attachBuffer(int* outSlot, const sp<GraphicBuffer>& buffer)
- override;
- status_t queueBuffer(int slot,
- const QueueBufferInput& input,
- QueueBufferOutput* output) override;
- status_t cancelBuffer(int slot, const sp<Fence>& fence) override;
- int query(int what, int* value) override;
- status_t connect(const sp<IProducerListener>& listener, int api,
- bool producerControlledByApp, QueueBufferOutput* output) override;
- status_t disconnect(int api, DisconnectMode mode = DisconnectMode::Api)
- override;
- status_t setSidebandStream(const sp<NativeHandle>& stream) override;
- void allocateBuffers(uint32_t width, uint32_t height,
- ::android::PixelFormat format, uint32_t usage) override;
- status_t allowAllocation(bool allow) override;
- status_t setGenerationNumber(uint32_t generationNumber) override;
- String8 getConsumerName() const override;
- status_t setSharedBufferMode(bool sharedBufferMode) override;
- status_t setAutoRefresh(bool autoRefresh) override;
- status_t setDequeueTimeout(nsecs_t timeout) override;
- status_t getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
- sp<Fence>* outFence, float outTransformMatrix[16]) override;
- void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
- status_t getUniqueId(uint64_t* outId) const override;
-};
-
-} // namespace utils
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/include/media/omx/1.0/WOmxNode.h b/include/media/omx/1.0/WOmxNode.h
index 3176a65..1d575e7 100644
--- a/include/media/omx/1.0/WOmxNode.h
+++ b/include/media/omx/1.0/WOmxNode.h
@@ -20,9 +20,9 @@
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
-#include <binder/HalToken.h>
#include <utils/Errors.h>
#include <media/IOMX.h>
+#include <hidl/HybridInterface.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
diff --git a/include/media/omx/1.0/WOmxProducerListener.h b/include/media/omx/1.0/WOmxProducerListener.h
deleted file mode 100644
index 7d20887..0000000
--- a/include/media/omx/1.0/WOmxProducerListener.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
-
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <binder/IBinder.h>
-#include <gui/IProducerListener.h>
-
-#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace utils {
-
-using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-using ::android::IProducerListener;
-using ::android::BnProducerListener;
-
-struct TWOmxProducerListener : public IOmxProducerListener {
- sp<IProducerListener> mBase;
- TWOmxProducerListener(sp<IProducerListener> const& base);
- Return<void> onBufferReleased() override;
- Return<bool> needsReleaseNotify() override;
-};
-
-class LWOmxProducerListener : public BnProducerListener {
-public:
- sp<IOmxProducerListener> mBase;
- LWOmxProducerListener(sp<IOmxProducerListener> const& base);
- void onBufferReleased() override;
- bool needsReleaseNotify() override;
-};
-
-} // namespace utils
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
diff --git a/include/ndk/NdkImage.h b/include/ndk/NdkImage.h
index 15eae40..40c1699 100644
--- a/include/ndk/NdkImage.h
+++ b/include/ndk/NdkImage.h
@@ -646,7 +646,9 @@
* <li>{@link AMEDIA_ERROR_UNSUPPORTED} if pixel stride is undefined for the format of input
* image.</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlanePixelStride(
const AImage* image, int planeIdx, /*out*/int32_t* pixelStride);
@@ -671,7 +673,9 @@
* <li>{@link AMEDIA_ERROR_UNSUPPORTED} if row stride is undefined for the format of input
* image.</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlaneRowStride(
const AImage* image, int planeIdx, /*out*/int32_t* rowStride);
@@ -693,7 +697,9 @@
* <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image, data or dataLength is NULL, or
* planeIdx is out of the range of [0, numOfPlanes - 1].</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlaneData(
const AImage* image, int planeIdx,
diff --git a/include/ndk/NdkMediaError.h b/include/ndk/NdkMediaError.h
index fb00b1d..9709a6f 100644
--- a/include/ndk/NdkMediaError.h
+++ b/include/ndk/NdkMediaError.h
@@ -60,6 +60,9 @@
AMEDIA_IMGREADER_ERROR_BASE = -30000,
AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE = AMEDIA_IMGREADER_ERROR_BASE - 1,
AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED = AMEDIA_IMGREADER_ERROR_BASE - 2,
+ AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 3,
+ AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 4,
+ AMEDIA_IMGREADER_IMAGE_NOT_LOCKED = AMEDIA_IMGREADER_ERROR_BASE - 5,
} media_status_t;
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 7a72237..9bc2594 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -1420,7 +1420,7 @@
// Instantiate a wave generator if ot already done for this frequency
if (mWaveGens.indexOfKey(frequency) == NAME_NOT_FOUND) {
ToneGenerator::WaveGenerator *lpWaveGen =
- new ToneGenerator::WaveGenerator((unsigned short)mSamplingRate,
+ new ToneGenerator::WaveGenerator(mSamplingRate,
frequency,
TONEGEN_GAIN/lNumWaves);
mWaveGens.add(frequency, lpWaveGen);
@@ -1544,7 +1544,7 @@
// none
//
////////////////////////////////////////////////////////////////////////////////
-ToneGenerator::WaveGenerator::WaveGenerator(unsigned short samplingRate,
+ToneGenerator::WaveGenerator::WaveGenerator(uint32_t samplingRate,
unsigned short frequency, float volume) {
double d0;
double F_div_Fs; // frequency / samplingRate
diff --git a/media/libaudioclient/include/ToneGenerator.h b/media/libaudioclient/include/ToneGenerator.h
index 9fd5f61..fc3d3ee 100644
--- a/media/libaudioclient/include/ToneGenerator.h
+++ b/media/libaudioclient/include/ToneGenerator.h
@@ -299,7 +299,7 @@
WAVEGEN_STOP // Stop wave on zero crossing
};
- WaveGenerator(unsigned short samplingRate, unsigned short frequency,
+ WaveGenerator(uint32_t samplingRate, unsigned short frequency,
float volume);
~WaveGenerator();
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
index 6d73e2d..758a5805 100644
--- a/media/libaudiohal/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/DevicesFactoryHalHidl.cpp
@@ -41,9 +41,11 @@
DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
mDevicesFactory = IDevicesFactory::getService();
if (mDevicesFactory != 0) {
- // It is assumet that DevicesFactory is owned by AudioFlinger
+ // It is assumed that DevicesFactory is owned by AudioFlinger
// and thus have the same lifespan.
mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ } else {
+ LOG_ALWAYS_FATAL("Failed to obtain IDevicesFactory service");
}
}
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
index fd3e207..f7dbb9c 100644
--- a/media/libaudiohal/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -44,6 +44,7 @@
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
mEffectsFactory = IEffectsFactory::getService();
+ LOG_ALWAYS_FATAL_IF(mEffectsFactory == 0, "Failed to obtain IEffectsFactory service");
}
EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
diff --git a/media/libaudioprocessing/AudioResamplerFirProcess.h b/media/libaudioprocessing/AudioResamplerFirProcess.h
index 176202e..a741677 100644
--- a/media/libaudioprocessing/AudioResamplerFirProcess.h
+++ b/media/libaudioprocessing/AudioResamplerFirProcess.h
@@ -186,7 +186,7 @@
TINTERP lerpP,
const TO* const volumeLR)
{
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS > 0)
+ static_assert(CHANNELS > 0, "CHANNELS must be > 0");
if (CHANNELS > 2) {
// TO accum[CHANNELS];
diff --git a/media/libaudioprocessing/AudioResamplerFirProcessNeon.h b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
index 1ce76a8..c335050 100644
--- a/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
+++ b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
@@ -83,7 +83,7 @@
const int16_t* coefsN1)
{
ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
sP -= CHANNELS*((STRIDE>>1)-1);
coefsP = (const int16_t*)__builtin_assume_aligned(coefsP, 16);
@@ -191,7 +191,7 @@
const int32_t* coefsN1)
{
ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
sP -= CHANNELS*((STRIDE>>1)-1);
coefsP = (const int32_t*)__builtin_assume_aligned(coefsP, 16);
@@ -366,7 +366,7 @@
const float* coefsN1)
{
ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
sP -= CHANNELS*((STRIDE>>1)-1);
coefsP = (const float*)__builtin_assume_aligned(coefsP, 16);
diff --git a/media/libaudioprocessing/AudioResamplerFirProcessSSE.h b/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
index 63ed052..30233b5 100644
--- a/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
+++ b/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
@@ -47,7 +47,7 @@
const float* coefsN1)
{
ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
sP -= CHANNELS*(4-1); // adjust sP for a loop iteration of four
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index c057cf5..8a1ce22 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -57,11 +57,9 @@
StringArray.cpp \
omx/1.0/WGraphicBufferSource.cpp \
omx/1.0/WOmx.cpp \
- omx/1.0/WOmxBufferProducer.cpp \
omx/1.0/WOmxBufferSource.cpp \
omx/1.0/WOmxNode.cpp \
omx/1.0/WOmxObserver.cpp \
- omx/1.0/WOmxProducerListener.cpp \
LOCAL_SHARED_LIBRARIES := \
libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
@@ -75,7 +73,9 @@
libhidlmemory \
android.hidl.base@1.0 \
android.hidl.memory@1.0 \
+ android.hidl.token@1.0-utils \
android.hardware.graphics.common@1.0 \
+ android.hardware.graphics.bufferqueue@1.0 \
android.hardware.media@1.0 \
android.hardware.media.omx@1.0 \
@@ -83,6 +83,7 @@
libbinder \
libsonivox \
libmediadrm \
+ android.hidl.token@1.0-utils \
android.hardware.media.omx@1.0 \
android.hidl.memory@1.0 \
@@ -104,7 +105,7 @@
LOCAL_EXPORT_C_INCLUDE_DIRS := \
frameworks/av/include/media \
- frameworks/av/media/libmedia/aidl
+ frameworks/av/media/libmedia/aidl \
LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
index 15ed579..a46017f 100644
--- a/media/libmedia/IHDCP.cpp
+++ b/media/libmedia/IHDCP.cpp
@@ -240,6 +240,8 @@
case HDCP_ENCRYPT:
{
+ CHECK_INTERFACE(IHDCP, data, reply);
+
size_t size = data.readInt32();
void *inData = NULL;
// watch out for overflow
@@ -313,6 +315,8 @@
case HDCP_DECRYPT:
{
+ CHECK_INTERFACE(IHDCP, data, reply);
+
size_t size = data.readInt32();
size_t bufSize = 2 * size;
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index bead69a..a6eba86 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -54,6 +54,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_PROXY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DEFAULT),
// STUB must be after DEFAULT, so the latter is picked up by toString first.
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
@@ -89,6 +90,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_PROXY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
// STUB must be after DEFAULT, so the latter is picked up by toString first.
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
@@ -112,6 +114,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_VOIP_RX),
TERMINATOR
};
@@ -124,6 +127,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
TERMINATOR
};
diff --git a/media/libmedia/include/IOMX.h b/media/libmedia/include/IOMX.h
index b4fc04c..62067c7 100644
--- a/media/libmedia/include/IOMX.h
+++ b/media/libmedia/include/IOMX.h
@@ -19,13 +19,13 @@
#define ANDROID_IOMX_H_
#include <binder/IInterface.h>
-#include <binder/HalToken.h>
#include <utils/List.h>
#include <utils/String8.h>
#include <cutils/native_handle.h>
#include <list>
+#include <hidl/HybridInterface.h>
#include <media/hardware/MetadataBufferType.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
diff --git a/media/libmedia/omx/1.0/WOmx.cpp b/media/libmedia/omx/1.0/WOmx.cpp
index 8e4e147..ce624fa 100644
--- a/media/libmedia/omx/1.0/WOmx.cpp
+++ b/media/libmedia/omx/1.0/WOmx.cpp
@@ -14,10 +14,10 @@
* limitations under the License.
*/
+#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
#include <media/omx/1.0/WOmx.h>
#include <media/omx/1.0/WOmxNode.h>
#include <media/omx/1.0/WOmxObserver.h>
-#include <media/omx/1.0/WOmxBufferProducer.h>
#include <media/omx/1.0/WGraphicBufferSource.h>
#include <media/omx/1.0/Conversion.h>
@@ -28,6 +28,11 @@
namespace V1_0 {
namespace utils {
+using ::android::hardware::graphics::bufferqueue::V1_0::utils::
+ H2BGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer
+ HGraphicBufferProducer;
+
// LWOmx
LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
}
@@ -70,10 +75,10 @@
status_t transStatus = toStatusT(mBase->createInputSurface(
[&fnStatus, bufferProducer, bufferSource] (
Status status,
- sp<IOmxBufferProducer> const& tProducer,
+ sp<HGraphicBufferProducer> const& tProducer,
sp<IGraphicBufferSource> const& tSource) {
fnStatus = toStatusT(status);
- *bufferProducer = new LWOmxBufferProducer(tProducer);
+ *bufferProducer = new H2BGraphicBufferProducer(tProducer);
*bufferSource = new LWGraphicBufferSource(tSource);
}));
return transStatus == NO_ERROR ? fnStatus : transStatus;
diff --git a/media/libmedia/omx/1.0/WOmxBufferProducer.cpp b/media/libmedia/omx/1.0/WOmxBufferProducer.cpp
deleted file mode 100644
index 03499f2..0000000
--- a/media/libmedia/omx/1.0/WOmxBufferProducer.cpp
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WOmxBufferProducer-utils"
-
-#include <utils/Log.h>
-
-#include <media/omx/1.0/WOmxBufferProducer.h>
-#include <media/omx/1.0/WOmxProducerListener.h>
-#include <media/omx/1.0/Conversion.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace utils {
-
-// TWOmxBufferProducer
-TWOmxBufferProducer::TWOmxBufferProducer(
- sp<IGraphicBufferProducer> const& base):
- mBase(base) {
-}
-
-Return<void> TWOmxBufferProducer::requestBuffer(
- int32_t slot, requestBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> buf;
- status_t status = mBase->requestBuffer(slot, &buf);
- AnwBuffer anwBuffer;
- wrapAs(&anwBuffer, *buf);
- _hidl_cb(toStatus(status), anwBuffer);
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::setMaxDequeuedBufferCount(
- int32_t maxDequeuedBuffers) {
- return toStatus(mBase->setMaxDequeuedBufferCount(
- static_cast<int>(maxDequeuedBuffers)));
-}
-
-Return<Status> TWOmxBufferProducer::setAsyncMode(bool async) {
- return toStatus(mBase->setAsyncMode(async));
-}
-
-Return<void> TWOmxBufferProducer::dequeueBuffer(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
- int slot;
- sp<Fence> fence;
- ::android::FrameEventHistoryDelta outTimestamps;
- status_t status = mBase->dequeueBuffer(
- &slot, &fence,
- width, height,
- static_cast<::android::PixelFormat>(format), usage,
- getFrameTimestamps ? &outTimestamps : nullptr);
- hidl_handle tFence;
- FrameEventHistoryDelta tOutTimestamps;
-
- native_handle_t* nh = nullptr;
- if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
- ALOGE("TWOmxBufferProducer::dequeueBuffer - Invalid output fence");
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- return Void();
- }
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
- ALOGE("TWOmxBufferProducer::dequeueBuffer - Invalid output timestamps");
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- return Void();
- }
-
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- if (getFrameTimestamps) {
- for (auto& nhA : nhAA) {
- for (auto& handle : nhA) {
- native_handle_delete(handle);
- }
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::detachBuffer(int32_t slot) {
- return toStatus(mBase->detachBuffer(slot));
-}
-
-Return<void> TWOmxBufferProducer::detachNextBuffer(
- detachNextBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> outBuffer;
- sp<Fence> outFence;
- status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
- AnwBuffer tBuffer;
- hidl_handle tFence;
-
- if (outBuffer == nullptr) {
- ALOGE("TWOmxBufferProducer::detachNextBuffer - Invalid output buffer");
- _hidl_cb(toStatus(status), tBuffer, tFence);
- return Void();
- }
- wrapAs(&tBuffer, *outBuffer);
- native_handle_t* nh = nullptr;
- if ((outFence != nullptr) && !wrapAs(&tFence, &nh, *outFence)) {
- ALOGE("TWOmxBufferProducer::detachNextBuffer - Invalid output fence");
- _hidl_cb(toStatus(status), tBuffer, tFence);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tBuffer, tFence);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::attachBuffer(
- const AnwBuffer& buffer,
- attachBuffer_cb _hidl_cb) {
- int outSlot;
- sp<GraphicBuffer> lBuffer = new GraphicBuffer();
- if (!convertTo(lBuffer.get(), buffer)) {
- ALOGE("TWOmxBufferProducer::attachBuffer - "
- "Invalid input native window buffer");
- _hidl_cb(toStatus(BAD_VALUE), -1);
- return Void();
- }
- status_t status = mBase->attachBuffer(&outSlot, lBuffer);
-
- _hidl_cb(toStatus(status), static_cast<int32_t>(outSlot));
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::queueBuffer(
- int32_t slot, const QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) {
- QueueBufferOutput tOutput;
- IGraphicBufferProducer::QueueBufferInput lInput(
- 0, false, HAL_DATASPACE_UNKNOWN,
- ::android::Rect(0, 0, 1, 1),
- NATIVE_WINDOW_SCALING_MODE_FREEZE,
- 0, ::android::Fence::NO_FENCE);
- if (!convertTo(&lInput, input)) {
- ALOGE("TWOmxBufferProducer::queueBuffer - Invalid input");
- _hidl_cb(toStatus(BAD_VALUE), tOutput);
- return Void();
- }
- IGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->queueBuffer(
- static_cast<int>(slot), lInput, &lOutput);
-
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- ALOGE("TWOmxBufferProducer::queueBuffer - Invalid output");
- _hidl_cb(toStatus(BAD_VALUE), tOutput);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::cancelBuffer(
- int32_t slot, const hidl_handle& fence) {
- sp<Fence> lFence = new Fence();
- if (!convertTo(lFence.get(), fence)) {
- ALOGE("TWOmxBufferProducer::cancelBuffer - Invalid input fence");
- return toStatus(BAD_VALUE);
- }
- return toStatus(mBase->cancelBuffer(static_cast<int>(slot), lFence));
-}
-
-Return<void> TWOmxBufferProducer::query(int32_t what, query_cb _hidl_cb) {
- int lValue;
- int lReturn = mBase->query(static_cast<int>(what), &lValue);
- _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::connect(
- const sp<IOmxProducerListener>& listener,
- int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
- sp<IProducerListener> lListener = listener == nullptr ?
- nullptr : new LWOmxProducerListener(listener);
- IGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->connect(lListener,
- static_cast<int>(api),
- producerControlledByApp,
- &lOutput);
-
- QueueBufferOutput tOutput;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- ALOGE("TWOmxBufferProducer::connect - Invalid output");
- _hidl_cb(toStatus(status), tOutput);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::disconnect(
- int32_t api, DisconnectMode mode) {
- return toStatus(mBase->disconnect(
- static_cast<int>(api),
- toGuiDisconnectMode(mode)));
-}
-
-Return<Status> TWOmxBufferProducer::setSidebandStream(const hidl_handle& stream) {
- return toStatus(mBase->setSidebandStream(NativeHandle::create(
- native_handle_clone(stream), true)));
-}
-
-Return<void> TWOmxBufferProducer::allocateBuffers(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
- mBase->allocateBuffers(
- width, height,
- static_cast<::android::PixelFormat>(format),
- usage);
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::allowAllocation(bool allow) {
- return toStatus(mBase->allowAllocation(allow));
-}
-
-Return<Status> TWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
- return toStatus(mBase->setGenerationNumber(generationNumber));
-}
-
-Return<void> TWOmxBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
- _hidl_cb(mBase->getConsumerName().string());
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
- return toStatus(mBase->setSharedBufferMode(sharedBufferMode));
-}
-
-Return<Status> TWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
- return toStatus(mBase->setAutoRefresh(autoRefresh));
-}
-
-Return<Status> TWOmxBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
- return toStatus(mBase->setDequeueTimeout(timeoutNs));
-}
-
-Return<void> TWOmxBufferProducer::getLastQueuedBuffer(
- getLastQueuedBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
- sp<Fence> lOutFence = new Fence();
- float lOutTransformMatrix[16];
- status_t status = mBase->getLastQueuedBuffer(
- &lOutBuffer, &lOutFence, lOutTransformMatrix);
-
- AnwBuffer tOutBuffer;
- if (lOutBuffer != nullptr) {
- wrapAs(&tOutBuffer, *lOutBuffer);
- }
- hidl_handle tOutFence;
- native_handle_t* nh = nullptr;
- if ((lOutFence == nullptr) || !wrapAs(&tOutFence, &nh, *lOutFence)) {
- ALOGE("TWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output fence");
- _hidl_cb(toStatus(status),
- tOutBuffer,
- tOutFence,
- hidl_array<float, 16>());
- return Void();
- }
- hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
-
- _hidl_cb(toStatus(status), tOutBuffer, tOutFence, tOutTransformMatrix);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::getFrameTimestamps(
- getFrameTimestamps_cb _hidl_cb) {
- ::android::FrameEventHistoryDelta lDelta;
- mBase->getFrameTimestamps(&lDelta);
-
- FrameEventHistoryDelta tDelta;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tDelta, &nhAA, lDelta)) {
- ALOGE("TWOmxBufferProducer::getFrameTimestamps - "
- "Invalid output frame timestamps");
- _hidl_cb(tDelta);
- return Void();
- }
-
- _hidl_cb(tDelta);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
- uint64_t outId;
- status_t status = mBase->getUniqueId(&outId);
- _hidl_cb(toStatus(status), outId);
- return Void();
-}
-
-// LWOmxBufferProducer
-
-LWOmxBufferProducer::LWOmxBufferProducer(sp<IOmxBufferProducer> const& base) :
- mBase(base) {
-}
-
-status_t LWOmxBufferProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
- *buf = new GraphicBuffer();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->requestBuffer(
- static_cast<int32_t>(slot),
- [&fnStatus, &buf] (Status status, AnwBuffer const& buffer) {
- fnStatus = toStatusT(status);
- if (!convertTo(buf->get(), buffer)) {
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::setMaxDequeuedBufferCount(
- int maxDequeuedBuffers) {
- return toStatusT(mBase->setMaxDequeuedBufferCount(
- static_cast<int32_t>(maxDequeuedBuffers)));
-}
-
-status_t LWOmxBufferProducer::setAsyncMode(bool async) {
- return toStatusT(mBase->setAsyncMode(async));
-}
-
-status_t LWOmxBufferProducer::dequeueBuffer(
- int* slot, sp<Fence>* fence,
- uint32_t w, uint32_t h, ::android::PixelFormat format,
- uint32_t usage, FrameEventHistoryDelta* outTimestamps) {
- *fence = new Fence();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->dequeueBuffer(
- w, h, static_cast<PixelFormat>(format), usage,
- outTimestamps != nullptr,
- [&fnStatus, slot, fence, outTimestamps] (
- Status status,
- int32_t tSlot,
- hidl_handle const& tFence,
- IOmxBufferProducer::FrameEventHistoryDelta const& tTs) {
- fnStatus = toStatusT(status);
- *slot = tSlot;
- if (!convertTo(fence->get(), tFence)) {
- ALOGE("LWOmxBufferProducer::dequeueBuffer - "
- "Invalid output fence");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- if (outTimestamps && !convertTo(outTimestamps, tTs)) {
- ALOGE("LWOmxBufferProducer::dequeueBuffer - "
- "Invalid output timestamps");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::detachBuffer(int slot) {
- return toStatusT(mBase->detachBuffer(static_cast<int>(slot)));
-}
-
-status_t LWOmxBufferProducer::detachNextBuffer(
- sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence) {
- *outBuffer = new GraphicBuffer();
- *outFence = new Fence();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->detachNextBuffer(
- [&fnStatus, outBuffer, outFence] (
- Status status,
- AnwBuffer const& tBuffer,
- hidl_handle const& tFence) {
- fnStatus = toStatusT(status);
- if (!convertTo(outFence->get(), tFence)) {
- ALOGE("LWOmxBufferProducer::detachNextBuffer - "
- "Invalid output fence");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- if (!convertTo(outBuffer->get(), tBuffer)) {
- ALOGE("LWOmxBufferProducer::detachNextBuffer - "
- "Invalid output buffer");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::attachBuffer(
- int* outSlot, const sp<GraphicBuffer>& buffer) {
- AnwBuffer tBuffer;
- wrapAs(&tBuffer, *buffer);
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->attachBuffer(tBuffer,
- [&fnStatus, outSlot] (Status status, int32_t slot) {
- fnStatus = toStatusT(status);
- *outSlot = slot;
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::queueBuffer(
- int slot,
- const QueueBufferInput& input,
- QueueBufferOutput* output) {
- IOmxBufferProducer::QueueBufferInput tInput;
- native_handle_t* nh;
- if (!wrapAs(&tInput, &nh, input)) {
- ALOGE("LWOmxBufferProducer::queueBuffer - Invalid input");
- return BAD_VALUE;
- }
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->queueBuffer(slot, tInput,
- [&fnStatus, output] (
- Status status,
- IOmxBufferProducer::QueueBufferOutput const& tOutput) {
- fnStatus = toStatusT(status);
- if (!convertTo(output, tOutput)) {
- ALOGE("LWOmxBufferProducer::queueBuffer - "
- "Invalid output");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- native_handle_delete(nh);
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
- hidl_handle tFence;
- native_handle_t* nh = nullptr;
- if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
- ALOGE("LWOmxBufferProducer::cancelBuffer - Invalid input fence");
- return BAD_VALUE;
- }
-
- status_t status = toStatusT(mBase->cancelBuffer(
- static_cast<int32_t>(slot), tFence));
- native_handle_delete(nh);
- return status;
-}
-
-int LWOmxBufferProducer::query(int what, int* value) {
- int result;
- status_t transStatus = toStatusT(mBase->query(
- static_cast<int32_t>(what),
- [&result, value] (int32_t tResult, int32_t tValue) {
- result = static_cast<int>(tResult);
- *value = static_cast<int>(tValue);
- }));
- return transStatus == NO_ERROR ? result : static_cast<int>(transStatus);
-}
-
-status_t LWOmxBufferProducer::connect(
- const sp<IProducerListener>& listener, int api,
- bool producerControlledByApp, QueueBufferOutput* output) {
- sp<IOmxProducerListener> tListener = listener == nullptr ?
- nullptr : new TWOmxProducerListener(listener);
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->connect(
- tListener, static_cast<int32_t>(api), producerControlledByApp,
- [&fnStatus, output] (
- Status status,
- IOmxBufferProducer::QueueBufferOutput const& tOutput) {
- fnStatus = toStatusT(status);
- if (!convertTo(output, tOutput)) {
- ALOGE("LWOmxBufferProducer::connect - Invalid output");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::disconnect(int api, DisconnectMode mode) {
- return toStatusT(mBase->disconnect(
- static_cast<int32_t>(api), toOmxDisconnectMode(mode)));
-}
-
-status_t LWOmxBufferProducer::setSidebandStream(
- const sp<NativeHandle>& stream) {
- return toStatusT(mBase->setSidebandStream(stream->handle()));
-}
-
-void LWOmxBufferProducer::allocateBuffers(uint32_t width, uint32_t height,
- ::android::PixelFormat format, uint32_t usage) {
- mBase->allocateBuffers(
- width, height, static_cast<PixelFormat>(format), usage);
-}
-
-status_t LWOmxBufferProducer::allowAllocation(bool allow) {
- return toStatusT(mBase->allowAllocation(allow));
-}
-
-status_t LWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
- return toStatusT(mBase->setGenerationNumber(generationNumber));
-}
-
-String8 LWOmxBufferProducer::getConsumerName() const {
- String8 lName;
- mBase->getConsumerName([&lName] (hidl_string const& name) {
- lName = name.c_str();
- });
- return lName;
-}
-
-status_t LWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
- return toStatusT(mBase->setSharedBufferMode(sharedBufferMode));
-}
-
-status_t LWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
- return toStatusT(mBase->setAutoRefresh(autoRefresh));
-}
-
-status_t LWOmxBufferProducer::setDequeueTimeout(nsecs_t timeout) {
- return toStatusT(mBase->setDequeueTimeout(static_cast<int64_t>(timeout)));
-}
-
-status_t LWOmxBufferProducer::getLastQueuedBuffer(
- sp<GraphicBuffer>* outBuffer,
- sp<Fence>* outFence,
- float outTransformMatrix[16]) {
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->getLastQueuedBuffer(
- [&fnStatus, outBuffer, outFence, &outTransformMatrix] (
- Status status,
- AnwBuffer const& buffer,
- hidl_handle const& fence,
- hidl_array<float, 16> const& transformMatrix) {
- fnStatus = toStatusT(status);
- *outBuffer = new GraphicBuffer();
- if (!convertTo(outBuffer->get(), buffer)) {
- ALOGE("LWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output buffer");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- *outFence = new Fence();
- if (!convertTo(outFence->get(), fence)) {
- ALOGE("LWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output fence");
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- std::copy(transformMatrix.data(),
- transformMatrix.data() + 16,
- outTransformMatrix);
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-void LWOmxBufferProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
- mBase->getFrameTimestamps([outDelta] (
- IOmxBufferProducer::FrameEventHistoryDelta const& tDelta) {
- convertTo(outDelta, tDelta);
- });
-}
-
-status_t LWOmxBufferProducer::getUniqueId(uint64_t* outId) const {
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->getUniqueId(
- [&fnStatus, outId] (Status status, uint64_t id) {
- fnStatus = toStatusT(status);
- *outId = id;
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-} // namespace utils
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmxProducerListener.cpp b/media/libmedia/omx/1.0/WOmxProducerListener.cpp
deleted file mode 100644
index 3ee381f..0000000
--- a/media/libmedia/omx/1.0/WOmxProducerListener.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/omx/1.0/WOmxProducerListener.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace utils {
-
-// TWOmxProducerListener
-TWOmxProducerListener::TWOmxProducerListener(
- sp<IProducerListener> const& base):
- mBase(base) {
-}
-
-Return<void> TWOmxProducerListener::onBufferReleased() {
- mBase->onBufferReleased();
- return Void();
-}
-
-Return<bool> TWOmxProducerListener::needsReleaseNotify() {
- return mBase->needsReleaseNotify();
-}
-
-// LWOmxProducerListener
-LWOmxProducerListener::LWOmxProducerListener(
- sp<IOmxProducerListener> const& base):
- mBase(base) {
-}
-
-void LWOmxProducerListener::onBufferReleased() {
- mBase->onBufferReleased();
-}
-
-bool LWOmxProducerListener::needsReleaseNotify() {
- return static_cast<bool>(mBase->needsReleaseNotify());
-}
-
-} // namespace utils
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 73570c8..f689ac9 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -69,7 +69,23 @@
// key for media statistics
static const char *kKeyRecorder = "recorder";
// attrs for media statistics
-//
+static const char *kRecorderHeight = "android.media.mediarecorder.height";
+static const char *kRecorderWidth = "android.media.mediarecorder.width";
+static const char *kRecorderFrameRate = "android.media.mediarecorder.frame-rate";
+static const char *kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
+static const char *kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
+static const char *kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
+static const char *kRecorderAudioBitrate = "android.media.mediarecorder.audio-bitrate";
+static const char *kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
+static const char *kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
+static const char *kRecorderAudioTimescale = "android.media.mediarecorder.audio-timescale";
+static const char *kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
+static const char *kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
+static const char *kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
+static const char *kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
+static const char *kRecorderCaptureFps = "android.media.mediarecorder.capture-fps";
+static const char *kRecorderRotation = "android.media.mediarecorder.rotation";
+
// To collect the encoder usage for the battery app
static void addBatteryData(uint32_t params) {
sp<IBinder> binder =
@@ -124,34 +140,34 @@
// TBD mOutputFormat = OUTPUT_FORMAT_THREE_GPP;
// TBD mAudioEncoder = AUDIO_ENCODER_AMR_NB;
// TBD mVideoEncoder = VIDEO_ENCODER_DEFAULT;
- mAnalyticsItem->setInt32("ht", mVideoHeight);
- mAnalyticsItem->setInt32("wid", mVideoWidth);
- mAnalyticsItem->setInt32("frame-rate", mFrameRate);
- mAnalyticsItem->setInt32("video-bitrate", mVideoBitRate);
- mAnalyticsItem->setInt32("audio-samplerate", mSampleRate);
- mAnalyticsItem->setInt32("audio-channels", mAudioChannels);
- mAnalyticsItem->setInt32("audio-bitrate", mAudioBitRate);
+ mAnalyticsItem->setInt32(kRecorderHeight, mVideoHeight);
+ mAnalyticsItem->setInt32(kRecorderWidth, mVideoWidth);
+ mAnalyticsItem->setInt32(kRecorderFrameRate, mFrameRate);
+ mAnalyticsItem->setInt32(kRecorderVideoBitrate, mVideoBitRate);
+ mAnalyticsItem->setInt32(kRecorderAudioSampleRate, mSampleRate);
+ mAnalyticsItem->setInt32(kRecorderAudioChannels, mAudioChannels);
+ mAnalyticsItem->setInt32(kRecorderAudioBitrate, mAudioBitRate);
// TBD mInterleaveDurationUs = 0;
- mAnalyticsItem->setInt32("video-iframe-interval", mIFramesIntervalSec);
+ mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
// TBD mAudioSourceNode = 0;
// TBD mUse64BitFileOffset = false;
- mAnalyticsItem->setInt32("movie-timescale", mMovieTimeScale);
- mAnalyticsItem->setInt32("audio-timescale", mAudioTimeScale);
- mAnalyticsItem->setInt32("video-timescale", mVideoTimeScale);
+ mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
+ mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
+ mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
// TBD mCameraId = 0;
// TBD mStartTimeOffsetMs = -1;
- mAnalyticsItem->setInt32("video-encoder-profile", mVideoEncoderProfile);
- mAnalyticsItem->setInt32("video-encoder-level", mVideoEncoderLevel);
+ mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
+ mAnalyticsItem->setInt32(kRecorderVideoLevel, mVideoEncoderLevel);
// TBD mMaxFileDurationUs = 0;
// TBD mMaxFileSizeBytes = 0;
// TBD mTrackEveryTimeDurationUs = 0;
- mAnalyticsItem->setInt32("capture-fpsenable", mCaptureFpsEnable);
- mAnalyticsItem->setInt32("capture-fps", mCaptureFps);
+ mAnalyticsItem->setInt32(kRecorderCaptureFpsEnable, mCaptureFpsEnable);
+ mAnalyticsItem->setDouble(kRecorderCaptureFps, mCaptureFps);
// TBD mTimeBetweenCaptureUs = -1;
// TBD mCameraSourceTimeLapse = NULL;
// TBD mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
// TBD mEncoderProfiles = MediaProfiles::getInstance();
- mAnalyticsItem->setInt32("rotation", mRotationDegrees);
+ mAnalyticsItem->setInt32(kRecorderRotation, mRotationDegrees);
// PII mLatitudex10000 = -3600000;
// PII mLongitudex10000 = -3600000;
// TBD mTotalBitRate = 0;
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 56c558d..08c3cf8 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -36,6 +36,7 @@
LOCAL_SHARED_LIBRARIES := \
libbinder \
libui \
+ libgui \
libmedia \
libmediadrm \
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index c949080..8378d24 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1916,7 +1916,7 @@
status_t NuPlayer::GenericSource::checkDrmInfo()
{
if (mFileMeta == NULL) {
- ALOGE("checkDrmInfo: No metadata");
+ ALOGI("checkDrmInfo: No metadata");
return OK; // letting the caller responds accordingly
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 19c4d85..621347d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -38,6 +38,22 @@
namespace android {
+// key for media statistics
+static const char *kKeyPlayer = "nuplayer";
+// attrs for media statistics
+static const char *kPlayerVMime = "android.media.mediaplayer.video.mime";
+static const char *kPlayerVCodec = "android.media.mediaplayer.video.codec";
+static const char *kPlayerWidth = "android.media.mediaplayer.width";
+static const char *kPlayerHeight = "android.media.mediaplayer.height";
+static const char *kPlayerFrames = "android.media.mediaplayer.frames";
+static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
+static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
+static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
+static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
+static const char *kPlayerPlaying = "android.media.mediaplayer.playingMs";
+static const char *kPlayerError = "android.media.mediaplayer.err";
+static const char *kPlayerErrorCode = "android.media.mediaplayer.errcode";
+
NuPlayerDriver::NuPlayerDriver(pid_t pid)
: mState(STATE_IDLE),
@@ -59,7 +75,7 @@
mLooper->setName("NuPlayerDriver Looper");
// set up an analytics record
- mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
mAnalyticsItem->generateSessionID();
mLooper->start(
@@ -499,7 +515,7 @@
if (where == NULL) {
where = "unknown";
}
- ALOGD("finalizeMetrics(%p) from %s at state %d", this, where, mState);
+ ALOGV("finalizeMetrics(%p) from %s at state %d", this, where, mState);
// gather the final stats for this record
Vector<sp<AMessage>> trackStats;
@@ -517,15 +533,15 @@
if (mime.startsWith("video/")) {
int32_t width, height;
- mAnalyticsItem->setCString("video/mime", mime.c_str());
+ mAnalyticsItem->setCString(kPlayerVMime, mime.c_str());
if (!name.empty()) {
- mAnalyticsItem->setCString("video/codec", name.c_str());
+ mAnalyticsItem->setCString(kPlayerVCodec, name.c_str());
}
if (stats->findInt32("width", &width)
&& stats->findInt32("height", &height)) {
- mAnalyticsItem->setInt32("wid", width);
- mAnalyticsItem->setInt32("ht", height);
+ mAnalyticsItem->setInt32(kPlayerWidth, width);
+ mAnalyticsItem->setInt32(kPlayerHeight, height);
}
int64_t numFramesTotal = 0;
@@ -533,14 +549,14 @@
stats->findInt64("frames-total", &numFramesTotal);
stats->findInt64("frames-dropped-output", &numFramesDropped);
- mAnalyticsItem->setInt64("frames", numFramesTotal);
- mAnalyticsItem->setInt64("dropped", numFramesDropped);
+ mAnalyticsItem->setInt64(kPlayerFrames, numFramesTotal);
+ mAnalyticsItem->setInt64(kPlayerFramesDropped, numFramesDropped);
} else if (mime.startsWith("audio/")) {
- mAnalyticsItem->setCString("audio/mime", mime.c_str());
+ mAnalyticsItem->setCString(kPlayerAMime, mime.c_str());
if (!name.empty()) {
- mAnalyticsItem->setCString("audio/codec", name.c_str());
+ mAnalyticsItem->setCString(kPlayerACodec, name.c_str());
}
}
}
@@ -549,11 +565,11 @@
int duration_ms = -1;
getDuration(&duration_ms);
if (duration_ms != -1) {
- mAnalyticsItem->setInt64("duration", duration_ms);
+ mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
}
if (mPlayingTimeUs > 0) {
- mAnalyticsItem->setInt64("playing", (mPlayingTimeUs+500)/1000 );
+ mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
}
}
}
@@ -563,7 +579,7 @@
if (where == NULL) {
where = "unknown";
}
- ALOGD("logMetrics(%p) from %s at state %d", this, where, mState);
+ ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
return;
@@ -923,6 +939,15 @@
case MEDIA_ERROR:
{
+ // when we have an error, add it to the analytics for this playback.
+ // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
+ // [test against msg is due to fall through from previous switch value]
+ if (msg == MEDIA_ERROR) {
+ mAnalyticsItem->setInt32(kPlayerError, ext1);
+ if (ext2 != 0) {
+ mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
+ }
+ }
mAtEOS = true;
break;
}
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 0e98db8..2892520 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -259,7 +259,7 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
size_t size;
int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
mCurrentTimeUs = seekFrame * 20000ll;
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 718710a..7d463a9 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -45,16 +45,32 @@
}
status_t HevcParameterSets::addNalUnit(const uint8_t* data, size_t size) {
+ if (size < 1) {
+ ALOGE("empty NAL b/35467107");
+ return ERROR_MALFORMED;
+ }
uint8_t nalUnitType = (data[0] >> 1) & 0x3f;
status_t err = OK;
switch (nalUnitType) {
case 32: // VPS
+ if (size < 2) {
+ ALOGE("invalid NAL/VPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parseVps(data + 2, size - 2);
break;
case 33: // SPS
+ if (size < 2) {
+ ALOGE("invalid NAL/SPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parseSps(data + 2, size - 2);
break;
case 34: // PPS
+ if (size < 2) {
+ ALOGE("invalid NAL/PPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parsePps(data + 2, size - 2);
break;
case 39: // Prefix SEI
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8728b6f..f695717 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -62,10 +62,15 @@
// key for media statistics
static const char *kCodecKeyName = "codec";
// attrs for media statistics
-static const char *kCodecCodec = "codec"; /* e.g. OMX.google.aac.decoder */
-static const char *kCodecMime = "mime"; /* e.g. audio/mime */
-static const char *kCodecMode = "mode"; /* audio, video */
-static const char *kCodecSecure = "secure"; /* 0, 1 */
+static const char *kCodecCodec = "android.media.mediacodec.codec"; /* e.g. OMX.google.aac.decoder */
+static const char *kCodecMime = "android.media.mediacodec.mime"; /* e.g. audio/mime */
+static const char *kCodecMode = "android.media.mediacodec.mode"; /* audio, video */
+static const char *kCodecSecure = "android.media.mediacodec.secure"; /* 0, 1 */
+static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
+static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
+static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
+static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
+static const char *kCodecEncoder = "android.media.mediacodec.encoder"; /* 0,1 */
@@ -636,9 +641,8 @@
mAnalyticsItem->setCString(kCodecCodec, name.c_str());
}
mAnalyticsItem->setCString(kCodecMode, mIsVideo ? "video" : "audio");
- //mAnalyticsItem->setInt32("type", nameIsType);
if (nameIsType)
- mAnalyticsItem->setInt32("encoder", encoder);
+ mAnalyticsItem->setInt32(kCodecEncoder, encoder);
}
status_t err;
@@ -698,14 +702,14 @@
if (mIsVideo) {
format->findInt32("width", &mVideoWidth);
format->findInt32("height", &mVideoHeight);
- if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
+ if (!format->findInt32(kCodecRotation, &mRotationDegrees)) {
mRotationDegrees = 0;
}
if (mAnalyticsItem != NULL) {
- mAnalyticsItem->setInt32("width", mVideoWidth);
- mAnalyticsItem->setInt32("height", mVideoHeight);
- mAnalyticsItem->setInt32("rotation", mRotationDegrees);
+ mAnalyticsItem->setInt32(kCodecWidth, mVideoWidth);
+ mAnalyticsItem->setInt32(kCodecHeight, mVideoHeight);
+ mAnalyticsItem->setInt32(kCodecRotation, mRotationDegrees);
}
// Prevent possible integer overflow in downstream code.
@@ -728,7 +732,7 @@
}
if (mAnalyticsItem != NULL) {
// XXX: save indication that it's crypto in some way...
- mAnalyticsItem->setInt32("crypto", 1);
+ mAnalyticsItem->setInt32(kCodecCrypto, 1);
}
}
@@ -1167,7 +1171,9 @@
return OK;
}
-status_t MediaCodec::getMetrics(Parcel *reply) {
+status_t MediaCodec::getMetrics(MediaAnalyticsItem * &reply) {
+
+ reply = NULL;
// shouldn't happen, but be safe
if (mAnalyticsItem == NULL) {
@@ -1177,7 +1183,7 @@
// XXX: go get current values for whatever in-flight data we want
// send it back to the caller.
- mAnalyticsItem->writeToParcel(reply);
+ reply = mAnalyticsItem->dup();
return OK;
}
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 62c0d8a..76775c29 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -57,6 +57,9 @@
// key for media statistics
static const char *kKeyExtractor = "extractor";
// attrs for media statistics
+static const char *kExtractorMime = "android.media.mediaextractor.mime";
+static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
+static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
MediaExtractor::MediaExtractor() {
if (!LOG_NDEBUG) {
@@ -265,9 +268,9 @@
if (MEDIA_LOG) {
if (ret->mAnalyticsItem != NULL) {
size_t ntracks = ret->countTracks();
- ret->mAnalyticsItem->setCString("fmt", ret->name());
+ ret->mAnalyticsItem->setCString(kExtractorFormat, ret->name());
// tracks (size_t)
- ret->mAnalyticsItem->setInt32("ntrk", ntracks);
+ ret->mAnalyticsItem->setInt32(kExtractorTracks, ntracks);
// metadata
sp<MetaData> pMetaData = ret->getMetaData();
if (pMetaData != NULL) {
@@ -276,7 +279,7 @@
// 'mime'
const char *mime = NULL;
if (pMetaData->findCString(kKeyMIMEType, &mime)) {
- ret->mAnalyticsItem->setCString("mime", mime);
+ ret->mAnalyticsItem->setCString(kExtractorMime, mime);
}
// what else is interesting and not already available?
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index ea3ed28..e3ca516 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -615,7 +615,7 @@
}
off64_t size;
- if (mDurationUs >= 0 && mDataSource->getSize(&size) == OK) {
+ if (mDurationUs > 0 && mDataSource->getSize(&size) == OK) {
*bitrate = size * 8000000ll / mDurationUs; // in bits/sec
return true;
}
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 5f516cb..44415e2 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "SoftAACEncoder2"
+#include <log/log.h>
#include <utils/Log.h>
#include "SoftAACEncoder2.h"
@@ -61,6 +62,7 @@
mSentCodecSpecificData(false),
mInputSize(0),
mInputFrame(NULL),
+ mAllocatedFrameSize(0),
mInputTimeUs(-1ll),
mSawInputEOS(false),
mSignalledError(false) {
@@ -510,6 +512,15 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (outHeader->nOffset + encInfo.confSize > outHeader->nAllocLen) {
+ ALOGE("b/34617444");
+ android_errorWriteLog(0x534e4554,"34617444");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
outHeader->nFilledLen = encInfo.confSize;
outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
@@ -556,6 +567,15 @@
if (mInputFrame == NULL) {
mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
+ mAllocatedFrameSize = numBytesPerInputFrame;
+ } else if (mAllocatedFrameSize != numBytesPerInputFrame) {
+ ALOGE("b/34621073: changed size from %d to %d",
+ (int)mAllocatedFrameSize, (int)numBytesPerInputFrame);
+ android_errorWriteLog(0x534e4554,"34621073");
+ delete mInputFrame;
+ mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
+ mAllocatedFrameSize = numBytesPerInputFrame;
+
}
if (mInputSize == 0) {
@@ -706,6 +726,7 @@
delete[] mInputFrame;
mInputFrame = NULL;
mInputSize = 0;
+ mAllocatedFrameSize = 0;
mSentCodecSpecificData = false;
mInputTimeUs = -1ll;
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index f1b81e1..123fd25 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -62,6 +62,7 @@
bool mSentCodecSpecificData;
size_t mInputSize;
int16_t *mInputFrame;
+ size_t mAllocatedFrameSize;
int64_t mInputTimeUs;
bool mSawInputEOS;
diff --git a/media/libstagefright/codecs/avc/Android.mk b/media/libstagefright/codecs/avc/Android.mk
deleted file mode 100644
index 2e43120..0000000
--- a/media/libstagefright/codecs/avc/Android.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
deleted file mode 100644
index 9959554..0000000
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- src/deblock.cpp \
- src/dpb.cpp \
- src/fmo.cpp \
- src/mb_access.cpp \
- src/reflist.cpp
-
-LOCAL_MODULE := libstagefright_avc_common
-
-LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/include
-
-LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow cfi
-LOCAL_SANITIZE_DIAG := cfi
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/common/MODULE_LICENSE_APACHE2 b/media/libstagefright/codecs/avc/common/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/media/libstagefright/codecs/avc/common/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/media/libstagefright/codecs/avc/common/NOTICE b/media/libstagefright/codecs/avc/common/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/media/libstagefright/codecs/avc/common/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
- Copyright (c) 2005-2008, The Android Open Source Project
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
diff --git a/media/libstagefright/codecs/avc/common/include/avc_types.h b/media/libstagefright/codecs/avc/common/include/avc_types.h
deleted file mode 100644
index ec8b6de..0000000
--- a/media/libstagefright/codecs/avc/common/include/avc_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef AVC_TYPES_H_
-
-#define AVC_TYPES_H_
-
-#include <stdint.h>
-
-typedef uint8_t uint8;
-typedef uint16_t uint16;
-typedef int16_t int16;
-typedef uint32_t uint32;
-typedef int32_t int32;
-typedef unsigned int uint;
-
-#endif // AVC_TYPES_H_
diff --git a/media/libstagefright/codecs/avc/common/include/avcapi_common.h b/media/libstagefright/codecs/avc/common/include/avcapi_common.h
deleted file mode 100644
index abffe6e..0000000
--- a/media/libstagefright/codecs/avc/common/include/avcapi_common.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains common type definitions and enumerations used by AVC encoder
-and decoder libraries which are exposed to the users.
-@publishedAll
-*/
-
-#ifndef AVCAPI_COMMON_H_INCLUDED
-#define AVCAPI_COMMON_H_INCLUDED
-
-#include "avc_types.h"
-
-#define PV_MEMORY_POOL
-/**
-This is common return status.
-@publishedAll
-*/
-typedef enum
-{
- AVC_NO_BUFFER = -2,
- AVC_MEMORY_FAIL = -1,
- AVC_FAIL = 0,
- AVC_SUCCESS = 1,
- AVC_PICTURE_OUTPUT_READY = 2
-} AVCStatus;
-
-/**
-This enumeration is for profiles. The value follows the profile_idc in sequence
-parameter set rbsp. See Annex A.
-@publishedAll
-*/
-typedef enum
-{
- AVC_BASELINE = 66,
- AVC_MAIN = 77,
- AVC_EXTENDED = 88,
- AVC_HIGH = 100,
- AVC_HIGH10 = 110,
- AVC_HIGH422 = 122,
- AVC_HIGH444 = 144
-} AVCProfile;
-
-/**
-This enumeration is for levels. The value follows the level_idc in sequence
-parameter set rbsp. See Annex A.
-@published All
-*/
-typedef enum
-{
- AVC_LEVEL_AUTO = 0,
- AVC_LEVEL1_B = 9,
- AVC_LEVEL1 = 10,
- AVC_LEVEL1_1 = 11,
- AVC_LEVEL1_2 = 12,
- AVC_LEVEL1_3 = 13,
- AVC_LEVEL2 = 20,
- AVC_LEVEL2_1 = 21,
- AVC_LEVEL2_2 = 22,
- AVC_LEVEL3 = 30,
- AVC_LEVEL3_1 = 31,
- AVC_LEVEL3_2 = 32,
- AVC_LEVEL4 = 40,
- AVC_LEVEL4_1 = 41,
- AVC_LEVEL4_2 = 42,
- AVC_LEVEL5 = 50,
- AVC_LEVEL5_1 = 51
-} AVCLevel;
-
-/**
-This enumeration follows Table 7-1 for NAL unit type codes.
-This may go to avccommon_api.h later (external common).
-@publishedAll
-*/
-typedef enum
-{
- AVC_NALTYPE_SLICE = 1, /* non-IDR non-data partition */
- AVC_NALTYPE_DPA = 2, /* data partition A */
- AVC_NALTYPE_DPB = 3, /* data partition B */
- AVC_NALTYPE_DPC = 4, /* data partition C */
- AVC_NALTYPE_IDR = 5, /* IDR NAL */
- AVC_NALTYPE_SEI = 6, /* supplemental enhancement info */
- AVC_NALTYPE_SPS = 7, /* sequence parameter set */
- AVC_NALTYPE_PPS = 8, /* picture parameter set */
- AVC_NALTYPE_AUD = 9, /* access unit delimiter */
- AVC_NALTYPE_EOSEQ = 10, /* end of sequence */
- AVC_NALTYPE_EOSTREAM = 11, /* end of stream */
- AVC_NALTYPE_FILL = 12 /* filler data */
-} AVCNalUnitType;
-
-/**
-This enumeration specifies debug logging type.
-This may go to avccommon_api.h later (external common).
-@publishedAll
-*/
-typedef enum
-{
- AVC_LOGTYPE_ERROR = 0,
- AVC_LOGTYPE_WARNING = 1,
- AVC_LOGTYPE_INFO = 2
-} AVCLogType;
-
-/**
-This enumerate the status of certain flags.
-@publishedAll
-*/
-typedef enum
-{
- AVC_OFF = 0,
- AVC_ON = 1
-} AVCFlag;
-
-/**
-This structure contains input information.
-Note, this structure is identical to AVCDecOutput for now.
-*/
-typedef struct tagAVCFrameIO
-{
- /** A unique identification number for a particular instance of this structure.
- To remain unchanged by the application between the time when it is given to the
- library and the time when the library returns it back. */
- uint32 id;
-
- /** Array of pointers to Y,Cb,Cr content in 4:2:0 format. For AVC decoding,
- this memory is allocated by the AVC decoder library. For AVC encoding, only the
- memory for original unencoded frame is allocated by the application. Internal
- memory is also allocated by the AVC encoder library. */
- uint8 *YCbCr[3];
-
- /** In/Out: Coded width of the luma component, it has to be multiple of 16. */
- int pitch;
-
- /** In/Out: Coded height of the luma component, must be multiple of 16. */
- int height;
-
- /** In/Out: Display width, less than picth */
- int clip_width;
-
- /** In/Out: Display height, less than height */
- int clip_height;
-
- /** Input: Origin of the display area [0]=>row, [1]=>column */
- int clip_origin[2];
-
- /** Output: Frame number in de/encoding order (not necessary)*/
- uint32 coding_order;
-
- /** Output: Frame number in displaying order (this may or may not be associated with the POC at all!!!). */
- uint32 disp_order;
-
- /** In/Out: Flag for use for reference or not. */
- uint is_reference;
-
- /** In/Out: Coding timestamp in msec (not display timestamp) */
- uint32 coding_timestamp;
-
- /* there could be something else here such as format, DON (decoding order number)
- if available thru SEI, etc. */
-} AVCFrameIO;
-
-
-/** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */
-/** In AVCDecControls structure, userData is a pointer to an object with the following
- member functions.
-*/
-
-
-/** @brief Decoded picture buffers (DPB) must be allocated or re-allocated before an
- IDR frame is decoded. If PV_MEMORY_POOL is not defined, AVC lib will allocate DPB
- internally which cannot be shared with the application. In that case, this function
- will not be called.
- @param userData The same value of userData in AVCHandle object.
- @param frame_size_in_mbs The size of each frame in number of macroblocks.
- @param num_frames The number of frames in DPB.
- @return 1 for success, 0 for fail (cannot allocate DPB)
-*/
-
-typedef int (*FunctionType_DPBAlloc)(void *userData, uint frame_size_in_mbs, uint num_buffers);
-
-/** @brief AVC library calls this function is reserve a memory of one frame from the DPB.
- Once reserved, this frame shall not be deleted or over-written by the app.
- @param userData The same value of userData in AVCHandle object.
- @param indx Index of a frame in DPB (AVC library keeps track of the index).
- @param yuv The address of the yuv pointer returned to the AVC lib.
- @return 1 for success, 0 for fail (no frames available to bind).
- */
-typedef int (*FunctionType_FrameBind)(void *userData, int indx, uint8 **yuv);
-
-/** @brief AVC library calls this function once a bound frame is not needed for decoding
- operation (falls out of the sliding window, or marked unused for reference).
- @param userData The same value of userData in AVCHandle object.
- @param indx Index of frame to be unbound (AVC library keeps track of the index).
- @return none.
-*/
-typedef void (*FuctionType_FrameUnbind)(void *userData, int);
-
-/** Pointer to malloc function for general memory allocation, so that application can keep track of
- memory usage.
-\param "size" "Size of requested memory in bytes."
-\param "attribute" "Some value specifying types, priority, etc. of the memory."
-\return "The address of the allocated, zero-initialized memory"
-*/
-typedef void* (*FunctionType_Malloc)(void *userData, int32 size, int attribute);
-
-/** Function pointer to free
-\param "mem" "Pointer to the memory to be freed"
-\return "void"
-*/
-typedef void (*FunctionType_Free)(void *userData, void *mem);
-
-/** Debug logging information is returned to the application thru this function.
-\param "type" "Type of logging message, see definition of AVCLogType."
-\param "string1" "Logging message."
-\param "string2" "To be defined."
-*/
-typedef void (*FunctionType_DebugLog)(uint32 *userData, AVCLogType type, char *string1, int val1, int val2);
-
-/**
-This structure has to be allocated and maintained by the user of the library.
-This structure is used as a handle to the library object.
-*/
-typedef struct tagAVCHandle
-{
- /** A pointer to the internal data structure. Users have to make sure that this value
- is NULL at the beginning.
- */
- void *AVCObject;
-
- /** A pointer to user object which has the following member functions used for
- callback purpose. !!! */
- void *userData;
-
- /** Pointers to functions implemented by the users of AVC library */
- FunctionType_DPBAlloc CBAVC_DPBAlloc;
-
- FunctionType_FrameBind CBAVC_FrameBind;
-
- FuctionType_FrameUnbind CBAVC_FrameUnbind;
-
- FunctionType_Malloc CBAVC_Malloc;
-
- FunctionType_Free CBAVC_Free;
-
- FunctionType_DebugLog CBAVC_DebugLog;
-
- /** Flag to enable debugging */
- uint32 debugEnable;
-
-} AVCHandle;
-
-
-
-#ifdef PVDEBUGMSG_LOG
-#define DEBUG_LOG(a,b,c,d,e) CBAVC_DebugLog(a,b,c,d,e)
-#else
-#define DEBUG_LOG(a,b,c,d,e)
-#endif
-
-#endif /* _AVCAPI_COMMON_H_ */
diff --git a/media/libstagefright/codecs/avc/common/include/avcint_common.h b/media/libstagefright/codecs/avc/common/include/avcint_common.h
deleted file mode 100644
index 465e604..0000000
--- a/media/libstagefright/codecs/avc/common/include/avcint_common.h
+++ /dev/null
@@ -1,882 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains common code shared between AVC decoder and AVC encoder for
-internal use only.
-@publishedAll
-*/
-
-#ifndef AVCINT_COMMON_H_INCLUDED
-#define AVCINT_COMMON_H_INCLUDED
-
-#ifndef AVCAPI_COMMON_H_INCLUDED
-#include "avcapi_common.h"
-#endif
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-
-
-/**
-Mathematic functions defined in subclause 5.7.
-Can be replaced with assembly instructions for speedup.
-@publishedAll
-*/
-#define AVC_ABS(x) (((x)<0)? -(x) : (x))
-#define AVC_SIGN(x) (((x)<0)? -1 : 1)
-#define AVC_SIGN0(x) (((x)<0)? -1 : (((x)>0) ? 1 : 0))
-#define AVC_MAX(x,y) ((x)>(y)? (x):(y))
-#define AVC_MIN(x,y) ((x)<(y)? (x):(y))
-#define AVC_MEDIAN(A,B,C) ((A) > (B) ? ((A) < (C) ? (A) : (B) > (C) ? (B) : (C)): (B) < (C) ? (B) : (C) > (A) ? (C) : (A))
-#define AVC_CLIP3(a,b,x) (AVC_MAX(a,AVC_MIN(x,b))) /* clip x between a and b */
-#define AVC_CLIP(x) AVC_CLIP3(0,255,x)
-#define AVC_FLOOR(x) ((int)(x))
-#define AVC_RASTER_SCAN(x,y,n) ((x)+(y)*(n))
-#define AVC_ROUND(x) (AVC_SIGN(x)*AVC_FLOOR(AVC_ABS(x)+0.5))
-#define AVC_INVERSE_RASTER_SCAN(a,b,c,d,e) (((e)==0)? (((a)%((d)/(b)))*(b)): (((a)/((d)/(b)))*(c)))
-/* a:block address, b:block width, c:block height, d:total_width, e:x or y coordinate */
-
-#define DEFAULT_ATTR 0 /* default memory attribute */
-#define FAST_MEM_ATTR 1 /* fast memory attribute */
-
-
-/* This section is for definition of constants. */
-#define MB_SIZE 16
-#define BLOCK_SIZE 4
-#define EMULATION_PREVENTION_THREE_BYTE 0x3
-#define NUM_PIXELS_IN_MB (24*16)
-#define NUM_BLKS_IN_MB 24
-
-#define AVCNumI4PredMode 9
-#define AVCNumI16PredMode 4
-#define AVCNumIChromaMode 4
-
-/* constants used in the structures below */
-#define MAXIMUMVALUEOFcpb_cnt 32 /* used in HRDParams */
-#define MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE 255 /* used in SeqParamSet */
-#define MAX_NUM_SLICE_GROUP 8 /* used in PicParamSet */
-#define MAX_REF_PIC_LIST_REORDERING 32 /* 32 is maximum according to Annex A, SliceHeader */
-#define MAX_DEC_REF_PIC_MARKING 64 /* 64 is the maximum possible given the max num ref pictures to 31. */
-#define MAX_FS (16+1) /* pre-defined size of frame store array */
-#define MAX_LEVEL_IDX 15 /* only 15 levels defined for now */
-#define MAX_REF_PIC_LIST 33 /* max size of the RefPicList0 and RefPicList1 */
-
-
-/**
-Architectural related macros.
-@publishedAll
-*/
-#ifdef USE_PRED_BLOCK
-#define MB_BASED_DEBLOCK
-#endif
-
-/**
-Picture type, PV created.
-@publishedAll
-*/
-typedef enum
-{
- AVC_FRAME = 3
-} AVCPictureType;
-
-/**
-This slice type follows Table 7-3. The bottom 5 items may not needed.
-@publishedAll
-*/
-typedef enum
-{
- AVC_P_SLICE = 0,
- AVC_B_SLICE = 1,
- AVC_I_SLICE = 2,
- AVC_SP_SLICE = 3,
- AVC_SI_SLICE = 4,
- AVC_P_ALL_SLICE = 5,
- AVC_B_ALL_SLICE = 6,
- AVC_I_ALL_SLICE = 7,
- AVC_SP_ALL_SLICE = 8,
- AVC_SI_ALL_SLICE = 9
-} AVCSliceType;
-
-/**
-Types of the macroblock and partition. PV Created.
-@publishedAll
-*/
-typedef enum
-{
- /* intra */
- AVC_I4,
- AVC_I16,
- AVC_I_PCM,
- AVC_SI4,
-
- /* inter for both P and B*/
- AVC_BDirect16,
- AVC_P16,
- AVC_P16x8,
- AVC_P8x16,
- AVC_P8,
- AVC_P8ref0,
- AVC_SKIP
-} AVCMBMode;
-
-/**
-Enumeration for sub-macroblock mode, interpreted from sub_mb_type.
-@publishedAll
-*/
-typedef enum
-{
- /* for sub-partition mode */
- AVC_BDirect8,
- AVC_8x8,
- AVC_8x4,
- AVC_4x8,
- AVC_4x4
-} AVCSubMBMode;
-
-/**
-Mode of prediction of partition or sub-partition. PV Created.
-Do not change the order!!! Used in table look-up mode prediction in
-vlc.c.
-@publishedAll
-*/
-typedef enum
-{
- AVC_Pred_L0 = 0,
- AVC_Pred_L1,
- AVC_BiPred,
- AVC_Direct
-} AVCPredMode;
-
-
-/**
-Mode of intra 4x4 prediction. Table 8-2
-@publishedAll
-*/
-typedef enum
-{
- AVC_I4_Vertical = 0,
- AVC_I4_Horizontal,
- AVC_I4_DC,
- AVC_I4_Diagonal_Down_Left,
- AVC_I4_Diagonal_Down_Right,
- AVC_I4_Vertical_Right,
- AVC_I4_Horizontal_Down,
- AVC_I4_Vertical_Left,
- AVC_I4_Horizontal_Up
-} AVCIntra4x4PredMode;
-
-/**
-Mode of intra 16x16 prediction. Table 8-3
-@publishedAll
-*/
-typedef enum
-{
- AVC_I16_Vertical = 0,
- AVC_I16_Horizontal,
- AVC_I16_DC,
- AVC_I16_Plane
-} AVCIntra16x16PredMode;
-
-
-/**
-Mode of intra chroma prediction. Table 8-4
-@publishedAll
-*/
-typedef enum
-{
- AVC_IC_DC = 0,
- AVC_IC_Horizontal,
- AVC_IC_Vertical,
- AVC_IC_Plane
-} AVCIntraChromaPredMode;
-
-/**
-Type of residual going to residual_block_cavlc function, PV created.
-@publishedAll
-*/
-typedef enum
-{
- AVC_Luma,
- AVC_Intra16DC,
- AVC_Intra16AC,
- AVC_ChromaDC,
- AVC_ChromaAC
-} AVCResidualType;
-
-
-/**
-This structure contains VUI parameters as specified in Annex E.
-Some variables may be removed from the structure if they are found to be useless to store.
-@publishedAll
-*/
-typedef struct tagHRDParams
-{
- uint cpb_cnt_minus1; /* ue(v), range 0..31 */
- uint bit_rate_scale; /* u(4) */
- uint cpb_size_scale; /* u(4) */
- uint32 bit_rate_value_minus1[MAXIMUMVALUEOFcpb_cnt];/* ue(v), range 0..2^32-2 */
- uint32 cpb_size_value_minus1[MAXIMUMVALUEOFcpb_cnt]; /* ue(v), range 0..2^32-2 */
- uint cbr_flag[MAXIMUMVALUEOFcpb_cnt]; /* u(1) */
- uint initial_cpb_removal_delay_length_minus1; /* u(5), default 23 */
- uint cpb_removal_delay_length_minus1; /* u(5), default 23 */
- uint dpb_output_delay_length_minus1; /* u(5), default 23 */
- uint time_offset_length; /* u(5), default 24 */
-} AVCHRDParams;
-
-/**
-This structure contains VUI parameters as specified in Annex E.
-Some variables may be removed from the structure if they are found to be useless to store.
-@publishedAll
-*/
-typedef struct tagVUIParam
-{
- uint aspect_ratio_info_present_flag; /* u(1) */
- uint aspect_ratio_idc; /* u(8), table E-1 */
- uint sar_width; /* u(16) */
- uint sar_height; /* u(16) */
- uint overscan_info_present_flag; /* u(1) */
- uint overscan_appropriate_flag; /* u(1) */
- uint video_signal_type_present_flag; /* u(1) */
- uint video_format; /* u(3), Table E-2, default 5, unspecified */
- uint video_full_range_flag; /* u(1) */
- uint colour_description_present_flag; /* u(1) */
- uint colour_primaries; /* u(8), Table E-3, default 2, unspecified */
- uint transfer_characteristics; /* u(8), Table E-4, default 2, unspecified */
- uint matrix_coefficients; /* u(8), Table E-5, default 2, unspecified */
- uint chroma_location_info_present_flag; /* u(1) */
- uint chroma_sample_loc_type_top_field; /* ue(v), Fig. E-1range 0..5, default 0 */
- uint chroma_sample_loc_type_bottom_field; /* ue(v) */
- uint timing_info_present_flag; /* u(1) */
- uint num_units_in_tick; /* u(32), must be > 0 */
- uint time_scale; /* u(32), must be > 0 */
- uint fixed_frame_rate_flag; /* u(1), Eq. C-13 */
- uint nal_hrd_parameters_present_flag; /* u(1) */
- AVCHRDParams nal_hrd_parameters; /* hrd_paramters */
- uint vcl_hrd_parameters_present_flag; /* u(1) */
- AVCHRDParams vcl_hrd_parameters; /* hrd_paramters */
- /* if ((nal_hrd_parameters_present_flag || (vcl_hrd_parameters_present_flag)) */
- uint low_delay_hrd_flag; /* u(1) */
- uint pic_struct_present_flag;
- uint bitstream_restriction_flag; /* u(1) */
- uint motion_vectors_over_pic_boundaries_flag; /* u(1) */
- uint max_bytes_per_pic_denom; /* ue(v), default 2 */
- uint max_bits_per_mb_denom; /* ue(v), range 0..16, default 1 */
- uint log2_max_mv_length_vertical; /* ue(v), range 0..16, default 16 */
- uint log2_max_mv_length_horizontal; /* ue(v), range 0..16, default 16 */
- uint max_dec_frame_reordering; /* ue(v) */
- uint max_dec_frame_buffering; /* ue(v) */
-} AVCVUIParams;
-
-
-/**
-This structure contains information in a sequence parameter set NAL.
-Some variables may be removed from the structure if they are found to be useless to store.
-@publishedAll
-*/
-typedef struct tagSeqParamSet
-{
- uint Valid; /* indicates the parameter set is valid */
-
- uint profile_idc; /* u(8) */
- uint constrained_set0_flag; /* u(1) */
- uint constrained_set1_flag; /* u(1) */
- uint constrained_set2_flag; /* u(1) */
- uint constrained_set3_flag; /* u(1) */
- uint level_idc; /* u(8) */
- uint seq_parameter_set_id; /* ue(v), range 0..31 */
- uint log2_max_frame_num_minus4; /* ue(v), range 0..12 */
- uint pic_order_cnt_type; /* ue(v), range 0..2 */
- /* if( pic_order_cnt_type == 0 ) */
- uint log2_max_pic_order_cnt_lsb_minus4; /* ue(v), range 0..12 */
- /* else if( pic_order_cnt_type == 1 ) */
- uint delta_pic_order_always_zero_flag; /* u(1) */
- int32 offset_for_non_ref_pic; /* se(v) */
- int32 offset_for_top_to_bottom_field; /* se(v) */
- uint num_ref_frames_in_pic_order_cnt_cycle; /* ue(v) , range 0..255 */
- /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */
- int32 offset_for_ref_frame[MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE]; /* se(v) */
- uint num_ref_frames; /* ue(v), range 0..16 */
- uint gaps_in_frame_num_value_allowed_flag; /* u(1) */
- uint pic_width_in_mbs_minus1; /* ue(v) */
- uint pic_height_in_map_units_minus1; /* ue(v) */
- uint frame_mbs_only_flag; /* u(1) */
- /* if( !frame_mbs_only_flag ) */
- uint mb_adaptive_frame_field_flag; /* u(1) */
- uint direct_8x8_inference_flag; /* u(1), must be 1 when frame_mbs_only_flag is 0 */
- uint frame_cropping_flag; /* u(1) */
- /* if( frmae_cropping_flag) */
- uint frame_crop_left_offset; /* ue(v) */
- uint frame_crop_right_offset; /* ue(v) */
- uint frame_crop_top_offset; /* ue(v) */
- uint frame_crop_bottom_offset; /* ue(v) */
- uint vui_parameters_present_flag; /* u(1) */
-// uint nal_hrd_parameters_present_flag;
-// uint vcl_hrd_parameters_present_flag;
-// AVCHRDParams *nal_hrd_parameters;
-// AVCHRDParams *vcl_hrd_parameters;
- AVCVUIParams vui_parameters; /* AVCVUIParam */
-} AVCSeqParamSet;
-
-/**
-This structure contains information in a picture parameter set NAL.
-Some variables may be removed from the structure if they are found to be useless to store.
-@publishedAll
-*/
-typedef struct tagPicParamSet
-{
- uint pic_parameter_set_id; /* ue(v), range 0..255 */
- uint seq_parameter_set_id; /* ue(v), range 0..31 */
- uint entropy_coding_mode_flag; /* u(1) */
- uint pic_order_present_flag; /* u(1) */
- uint num_slice_groups_minus1; /* ue(v), range in Annex A */
- /* if( num_slice_groups_minus1 > 0) */
- uint slice_group_map_type; /* ue(v), range 0..6 */
- /* if( slice_group_map_type = = 0 ) */
- /* for(0:1:num_slice_groups_minus1) */
- uint run_length_minus1[MAX_NUM_SLICE_GROUP]; /* ue(v) */
- /* else if( slice_group_map_type = = 2 ) */
- /* for(0:1:num_slice_groups_minus1-1) */
- uint top_left[MAX_NUM_SLICE_GROUP-1]; /* ue(v) */
- uint bottom_right[MAX_NUM_SLICE_GROUP-1]; /* ue(v) */
- /* else if( slice_group_map_type = = 3 || 4 || 5 */
- uint slice_group_change_direction_flag; /* u(1) */
- uint slice_group_change_rate_minus1; /* ue(v) */
- /* else if( slice_group_map_type = = 6 ) */
- uint pic_size_in_map_units_minus1; /* ue(v) */
- /* for(0:1:pic_size_in_map_units_minus1) */
- uint *slice_group_id; /* complete MBAmap u(v) */
- uint num_ref_idx_l0_active_minus1; /* ue(v), range 0..31 */
- uint num_ref_idx_l1_active_minus1; /* ue(v), range 0..31 */
- uint weighted_pred_flag; /* u(1) */
- uint weighted_bipred_idc; /* u(2), range 0..2 */
- int pic_init_qp_minus26; /* se(v), range -26..25 */
- int pic_init_qs_minus26; /* se(v), range -26..25 */
- int chroma_qp_index_offset; /* se(v), range -12..12 */
- uint deblocking_filter_control_present_flag; /* u(1) */
- uint constrained_intra_pred_flag; /* u(1) */
- uint redundant_pic_cnt_present_flag; /* u(1) */
-} AVCPicParamSet;
-
-
-/**
-This structure contains slice header information.
-Some variables may be removed from the structure if they are found to be useless to store.
-@publishedAll
-*/
-typedef struct tagSliceHeader
-{
- uint first_mb_in_slice; /* ue(v) */
- AVCSliceType slice_type; /* ue(v), Table 7-3, range 0..9 */
- uint pic_parameter_set_id; /* ue(v), range 0..255 */
- uint frame_num; /* u(v), see log2max_frame_num_minus4 */
- /* if( !frame_mbs_only_flag) */
- uint field_pic_flag; /* u(1) */
- /* if(field_pic_flag) */
- uint bottom_field_flag; /* u(1) */
- /* if(nal_unit_type == 5) */
- uint idr_pic_id; /* ue(v), range 0..65535 */
- /* if(pic_order_cnt_type==0) */
- uint pic_order_cnt_lsb; /* u(v), range 0..MaxPicOrderCntLsb-1 */
- /* if(pic_order_present_flag && !field_pic_flag) */
- int32 delta_pic_order_cnt_bottom; /* se(v) */
- /* if(pic_order_cnt_type==1 && !delta_pic_order_always_zero_flag) */
- /* if(pic_order_present_flag && !field_pic_flag) */
- int32 delta_pic_order_cnt[2];
- /* if(redundant_pic_cnt_present_flag) */
- uint redundant_pic_cnt; /* ue(v), range 0..127 */
- /* if(slice_type == B) */
- uint direct_spatial_mv_pred_flag; /* u(1) */
- /* if(slice_type == P || slice_type==SP || slice_type==B) */
- uint num_ref_idx_active_override_flag; /* u(1) */
- /* if(num_ref_idx_active_override_flag) */
- uint num_ref_idx_l0_active_minus1; /* ue(v) */
- /* if(slie_type == B) */
- uint num_ref_idx_l1_active_minus1; /* ue(v) */
-
- /* ref_pic_list_reordering() */
- uint ref_pic_list_reordering_flag_l0; /* u(1) */
- uint reordering_of_pic_nums_idc_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v), range 0..3 */
- uint abs_diff_pic_num_minus1_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */
- uint long_term_pic_num_l0[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */
- uint ref_pic_list_reordering_flag_l1; /* u(1) */
- uint reordering_of_pic_nums_idc_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v), range 0..3 */
- uint abs_diff_pic_num_minus1_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */
- uint long_term_pic_num_l1[MAX_REF_PIC_LIST_REORDERING]; /* ue(v) */
-
- /* end ref_pic_list_reordering() */
- /* if(nal_ref_idc!=0) */
- /* dec_ref_pic_marking() */
- uint no_output_of_prior_pics_flag; /* u(1) */
- uint long_term_reference_flag; /* u(1) */
- uint adaptive_ref_pic_marking_mode_flag; /* u(1) */
- uint memory_management_control_operation[MAX_DEC_REF_PIC_MARKING]; /* ue(v), range 0..6 */
- uint difference_of_pic_nums_minus1[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */
- uint long_term_pic_num[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */
- uint long_term_frame_idx[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */
- uint max_long_term_frame_idx_plus1[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */
- /* end dec_ref_pic_marking() */
- /* if(entropy_coding_mode_flag && slice_type!=I && slice_type!=SI) */
- uint cabac_init_idc; /* ue(v), range 0..2 */
- int slice_qp_delta; /* se(v), range 0..51 */
- /* if(slice_type==SP || slice_type==SI) */
- /* if(slice_type==SP) */
- uint sp_for_switch_flag; /* u(1) */
- int slice_qs_delta; /* se(v) */
-
- /* if(deblocking_filter_control_present_flag)*/
- uint disable_deblocking_filter_idc; /* ue(v), range 0..2 */
- /* if(disable_deblocking_filter_idc!=1) */
- int slice_alpha_c0_offset_div2; /* se(v), range -6..6, default 0 */
- int slice_beta_offset_div_2; /* se(v), range -6..6, default 0 */
- /* if(num_slice_groups_minus1>0 && slice_group_map_type>=3 && slice_group_map_type<=5)*/
- uint slice_group_change_cycle; /* u(v), use ceil(log2(PicSizeInMapUnits/SliceGroupChangeRate + 1)) bits*/
-
-} AVCSliceHeader;
-
-/**
-This struct contains information about the neighboring pixel.
-@publishedAll
-*/
-typedef struct tagPixPos
-{
- int available;
- int mb_addr; /* macroblock address of the current pixel, see below */
- int x; /* x,y positions of current pixel relative to the macroblock mb_addr */
- int y;
- int pos_x; /* x,y positions of current pixel relative to the picture. */
- int pos_y;
-} AVCPixelPos;
-
-typedef struct tagNeighborAvailability
-{
- int left;
- int top; /* macroblock address of the current pixel, see below */
- int top_right; /* x,y positions of current pixel relative to the macroblock mb_addr */
-} AVCNeighborAvailability;
-
-
-/**
-This structure contains picture data and related information necessary to be used as
-reference frame.
-@publishedAll
-*/
-typedef struct tagPictureData
-{
- uint16 RefIdx; /* index used for reference frame */
- uint8 *Sl; /* derived from base_dpb in AVCFrameStore */
- uint8 *Scb; /* for complementary fields, YUV are interlaced */
- uint8 *Scr; /* Sl of top_field and bottom_fields will be one line apart and the
- stride will be 2 times the width. */
- /* For non-complementary field, the above still applies. A special
- output formatting is required. */
-
- /* Then, necessary variables that need to be stored */
- AVCPictureType picType; /* frame, top-field or bot-field */
- /*bool*/
- uint isReference;
- /*bool*/
- uint isLongTerm;
- int PicOrderCnt;
- int PicNum;
- int LongTermPicNum;
-
- int width; /* how many pixel per line */
- int height;/* how many line */
- int pitch; /* how many pixel between the line */
-
- uint padded; /* flag for being padded */
-
-} AVCPictureData;
-
-/**
-This structure contains information for frame storage.
-@publishedAll
-*/
-typedef struct tagFrameStore
-{
- uint8 *base_dpb; /* base pointer for the YCbCr */
-
- int IsReference; /* 0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */
- int IsLongTerm; /* 0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */
- /* if IsLongTerm is true, IsReference can be ignored. */
- /* if IsReference is true, IsLongterm will be checked for short-term or long-term. */
- /* IsUsed must be true to enable the validity of IsReference and IsLongTerm */
-
- int IsOutputted; /* has it been outputted via AVCDecGetOutput API, then don't output it again,
- wait until it is returned. */
- AVCPictureData frame;
-
- int FrameNum;
- int FrameNumWrap;
- int LongTermFrameIdx;
- int PicOrderCnt; /* of the frame, smaller of the 2 fields */
-
-} AVCFrameStore;
-
-/**
-This structure maintains the actual memory for the decoded picture buffer (DPB) which is
-allocated at the beginning according to profile/level.
-Once decoded_picture_buffer is allocated, Sl,Scb,Scr in
-AVCPictureData structure just point to the address in decoded_picture_buffer.
-used_size maintains the used space.
-NOTE:: In order to maintain contiguous memory space, memory equal to a single frame is
-assigned at a time. Two opposite fields reside in the same frame memory.
-
- |-------|---|---|---|xxx|-------|xxx|---|-------| decoded_picture_buffer
- frame top bot top frame bot frame
- 0 1 1 2 3 4 5
-
- bot 2 and top 4 do not exist, the memory is not used.
-
-@publishedAll
-*/
-typedef struct tagDecPicBuffer
-{
- uint8 *decoded_picture_buffer; /* actual memory */
- uint32 dpb_size; /* size of dpb in bytes */
- uint32 used_size; /* used size */
- struct tagFrameStore *fs[MAX_FS]; /* list of frame stored, actual buffer */
- int num_fs; /* size of fs */
-
-} AVCDecPicBuffer;
-
-
-/**
-This structure contains macroblock related variables.
-@publishedAll
-*/
-typedef struct tagMacroblock
-{
- AVCIntraChromaPredMode intra_chroma_pred_mode; /* ue(v) */
-
- int32 mvL0[16]; /* motion vectors, 16 bit packed (x,y) per element */
- int32 mvL1[16];
- int16 ref_idx_L0[4];
- int16 ref_idx_L1[4];
- uint16 RefIdx[4]; /* ref index, has value of AVCPictureData->RefIdx */
- /* stored data */
- /*bool*/
- uint mb_intra; /* intra flag */
- /*bool*/
- uint mb_bottom_field;
-
- AVCMBMode mbMode; /* type of MB prediction */
- AVCSubMBMode subMbMode[4]; /* for each 8x8 partition */
-
- uint CBP; /* CodeBlockPattern */
- AVCIntra16x16PredMode i16Mode; /* Intra16x16PredMode */
- AVCIntra4x4PredMode i4Mode[16]; /* Intra4x4PredMode, in raster scan order */
- int NumMbPart; /* number of partition */
- AVCPredMode MBPartPredMode[4][4]; /* prediction mode [MBPartIndx][subMBPartIndx] */
- int MbPartWidth;
- int MbPartHeight;
- int NumSubMbPart[4]; /* for each 8x8 partition */
- int SubMbPartWidth[4]; /* for each 8x8 partition */
- int SubMbPartHeight[4]; /* for each 8x8 partition */
-
- uint8 nz_coeff[NUM_BLKS_IN_MB]; /* [blk_y][blk_x], Chroma is [4..5][0...3], see predict_nnz() function */
-
- int QPy; /* Luma QP */
- int QPc; /* Chroma QP */
- int QSc; /* Chroma QP S-picture */
-
- int slice_id; // MC slice
-} AVCMacroblock;
-
-
-/**
-This structure contains common internal variables between the encoder and decoder
-such that some functions can be shared among them.
-@publishedAll
-*/
-typedef struct tagCommonObj
-{
- /* put these 2 up here to make sure they are word-aligned */
- int16 block[NUM_PIXELS_IN_MB]; /* for transformed residue coefficient */
- uint8 *pred_block; /* pointer to prediction block, could point to a frame */
-#ifdef USE_PRED_BLOCK
- uint8 pred[688]; /* for prediction */
- /* Luma [0-399], Cb [400-543], Cr[544-687] */
-#endif
- int pred_pitch; /* either equal to 20 or to frame pitch */
-
- /* temporary buffers for intra prediction */
- /* these variables should remain inside fast RAM */
-#ifdef MB_BASED_DEBLOCK
- uint8 *intra_pred_top; /* a row of pixel for intra prediction */
- uint8 intra_pred_left[17]; /* a column of pixel for intra prediction */
- uint8 *intra_pred_top_cb;
- uint8 intra_pred_left_cb[9];
- uint8 *intra_pred_top_cr;
- uint8 intra_pred_left_cr[9];
-#endif
- /* pointer to the prediction area for intra prediction */
- uint8 *pintra_pred_top; /* pointer to the top intra prediction value */
- uint8 *pintra_pred_left; /* pointer to the left intra prediction value */
- uint8 intra_pred_topleft; /* the [-1,-1] neighboring pixel */
- uint8 *pintra_pred_top_cb;
- uint8 *pintra_pred_left_cb;
- uint8 intra_pred_topleft_cb;
- uint8 *pintra_pred_top_cr;
- uint8 *pintra_pred_left_cr;
- uint8 intra_pred_topleft_cr;
-
- int QPy;
- int QPc;
- int QPy_div_6;
- int QPy_mod_6;
- int QPc_div_6;
- int QPc_mod_6;
- /**** nal_unit ******/
- /* previously in AVCNALUnit format */
- uint NumBytesInRBSP;
- int forbidden_bit;
- int nal_ref_idc;
- AVCNalUnitType nal_unit_type;
- AVCNalUnitType prev_nal_unit_type;
- /*bool*/
- uint slice_data_partitioning; /* flag when nal_unit_type is between 2 and 4 */
- /**** ******** ******/
- AVCSliceType slice_type;
- AVCDecPicBuffer *decPicBuf; /* decoded picture buffer */
-
- AVCSeqParamSet *currSeqParams; /* the currently used one */
-
- AVCPicParamSet *currPicParams; /* the currently used one */
- uint seq_parameter_set_id;
- /* slice header */
- AVCSliceHeader *sliceHdr; /* slice header param syntax variables */
-
- AVCPictureData *currPic; /* pointer to current picture */
- AVCFrameStore *currFS; /* pointer to current frame store */
- AVCPictureType currPicType; /* frame, top-field or bot-field */
- /*bool*/
- uint newPic; /* flag for new picture */
- uint newSlice; /* flag for new slice */
- AVCPictureData *prevRefPic; /* pointer to previous picture */
-
- AVCMacroblock *mblock; /* array of macroblocks covering entire picture */
- AVCMacroblock *currMB; /* pointer to current macroblock */
- uint mbNum; /* number of current MB */
- int mb_x; /* x-coordinate of the current mbNum */
- int mb_y; /* y-coordinate of the current mbNum */
-
- /* For internal operation, scratch memory for MV, prediction, transform, etc.*/
- uint32 cbp4x4; /* each bit represent nonzero 4x4 block in reverse raster scan order */
- /* starting from luma, Cb and Cr, lsb toward msb */
- int mvd_l0[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */
- int mvd_l1[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */
-
- int mbAddrA, mbAddrB, mbAddrC, mbAddrD; /* address of neighboring MBs */
- /*bool*/
- uint mbAvailA, mbAvailB, mbAvailC, mbAvailD; /* availability */
- /*bool*/
- uint intraAvailA, intraAvailB, intraAvailC, intraAvailD; /* for intra mode */
- /***********************************************/
- /* The following variables are defined in the draft. */
- /* They may need to be stored in PictureData structure and used for reference. */
- /* In that case, just move or copy it to AVCDecPictureData structure. */
-
- int padded_size; /* size of extra padding to a frame */
-
- uint MaxFrameNum; /*2^(log2_max_frame_num_minus4+4), range 0.. 2^16-1 */
- uint MaxPicOrderCntLsb; /*2^(log2_max_pic_order_cnt_lsb_minus4+4), 0..2^16-1 */
- uint PicWidthInMbs; /*pic_width_in_mbs_minus1+1 */
- uint PicWidthInSamplesL; /* PicWidthInMbs*16 */
- uint PicWidthInSamplesC; /* PicWIdthInMbs*8 */
- uint PicHeightInMapUnits; /* pic_height_in_map_units_minus1+1 */
- uint PicSizeInMapUnits; /* PicWidthInMbs*PicHeightInMapUnits */
- uint FrameHeightInMbs; /*(2-frame_mbs_only_flag)*PicHeightInMapUnits */
-
- uint SliceGroupChangeRate; /* slice_group_change_rate_minus1 + 1 */
-
- /* access unit */
- uint primary_pic_type; /* u(3), Table 7-2, kinda informative only */
-
- /* slice data partition */
- uint slice_id; /* ue(v) */
-
- uint UnusedShortTermFrameNum;
- uint PrevRefFrameNum;
- uint MbaffFrameFlag; /* (mb_adaptive_frame_field_flag && !field_pic_flag) */
- uint PicHeightInMbs; /* FrameHeightInMbs/(1+field_pic_flag) */
- int PicHeightInSamplesL; /* PicHeightInMbs*16 */
- int PicHeightInSamplesC; /* PicHeightInMbs*8 */
- uint PicSizeInMbs; /* PicWidthInMbs*PicHeightInMbs */
- uint level_idc;
- int numMBs;
- uint MaxPicNum;
- uint CurrPicNum;
- int QSy; /* 26+pic_init_qp_minus26+slice_qs_delta */
- int FilterOffsetA;
- int FilterOffsetB;
- uint MapUnitsInSliceGroup0; /* Min(slie_group_change_cycle*SliceGroupChangeRate,PicSizeInMapUnits) */
- /* dec_ref_pic_marking */
- int MaxLongTermFrameIdx;
- int LongTermFrameIdx;
-
- /* POC related variables */
- /*bool*/
- uint mem_mgr_ctrl_eq_5; /* if memory_management_control_operation equal to 5 flag */
- int PicOrderCnt;
- int BottomFieldOrderCnt, TopFieldOrderCnt;
- /* POC mode 0 */
- int prevPicOrderCntMsb;
- uint prevPicOrderCntLsb;
- int PicOrderCntMsb;
- /* POC mode 1 */
- int prevFrameNumOffset, FrameNumOffset;
- uint prevFrameNum;
- int absFrameNum;
- int picOrderCntCycleCnt, frameNumInPicOrderCntCycle;
- int expectedDeltaPerPicOrderCntCycle;
- int expectedPicOrderCnt;
-
- /* FMO */
- int *MbToSliceGroupMap; /* to be re-calculate at the beginning */
-
- /* ref pic list */
- AVCPictureData *RefPicList0[MAX_REF_PIC_LIST]; /* list 0 */
- AVCPictureData *RefPicList1[MAX_REF_PIC_LIST]; /* list 1 */
- AVCFrameStore *refFrameList0ShortTerm[32];
- AVCFrameStore *refFrameList1ShortTerm[32];
- AVCFrameStore *refFrameListLongTerm[32];
- int refList0Size;
- int refList1Size;
-
- /* slice data semantics*/
- int mb_skip_run; /* ue(v) */
- /*uint mb_skip_flag;*/ /* ae(v) */
- /* uint end_of_slice_flag;*//* ae(v) */
- /***********************************************/
-
- /* function pointers */
- int (*is_short_ref)(AVCPictureData *s);
- int (*is_long_ref)(AVCPictureData *s);
-
-} AVCCommonObj;
-
-/**
-Commonly used constant arrays.
-@publishedAll
-*/
-/**
-Zigzag scan from 1-D to 2-D. */
-const static uint8 ZZ_SCAN[16] = {0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15};
-/* Zigzag scan from 1-D to 2-D output to block[24][16]. */
-const static uint8 ZZ_SCAN_BLOCK[16] = {0, 1, 16, 32, 17, 2, 3, 18, 33, 48, 49, 34, 19, 35, 50, 51};
-
-/**
-From zigzag to raster for luma DC value */
-const static uint8 ZIGZAG2RASTERDC[16] = {0, 4, 64, 128, 68, 8, 12, 72, 132, 192, 196, 136, 76, 140, 200, 204};
-
-
-/**
-Mapping from coding scan block indx to raster scan block index */
-const static int blkIdx2blkX[16] = {0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3};
-const static int blkIdx2blkY[16] = {0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3};
-/** from [blk8indx][blk4indx] to raster scan index */
-const static int blkIdx2blkXY[4][4] = {{0, 1, 4, 5}, {2, 3, 6, 7}, {8, 9, 12, 13}, {10, 11, 14, 15}};
-
-/*
-Availability of the neighboring top-right block relative to the current block. */
-const static int BlkTopRight[16] = {2, 2, 2, 3, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0};
-
-/**
-Table 8-13 Specification of QPc as a function of qPI. */
-const static uint8 mapQPi2QPc[52] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 34, 35, 35, 36, 36,
- 37, 37, 37, 38, 38, 38, 39, 39, 39, 39
- };
-
-/**
-See 8.5.5 equation (8-252 and 8-253) the definition of v matrix. */
-/* in zigzag scan */
-const static int dequant_coefres[6][16] =
-{
- {10, 13, 13, 10, 16, 10, 13, 13, 13, 13, 16, 10, 16, 13, 13, 16},
- {11, 14, 14, 11, 18, 11, 14, 14, 14, 14, 18, 11, 18, 14, 14, 18},
- {13, 16, 16, 13, 20, 13, 16, 16, 16, 16, 20, 13, 20, 16, 16, 20},
- {14, 18, 18, 14, 23, 14, 18, 18, 18, 18, 23, 14, 23, 18, 18, 23},
- {16, 20, 20, 16, 25, 16, 20, 20, 20, 20, 25, 16, 25, 20, 20, 25},
- {18, 23, 23, 18, 29, 18, 23, 23, 23, 23, 29, 18, 29, 23, 23, 29}
-};
-
-/**
-From jm7.6 block.c. (in zigzag scan) */
-const static int quant_coef[6][16] =
-{
- {13107, 8066, 8066, 13107, 5243, 13107, 8066, 8066, 8066, 8066, 5243, 13107, 5243, 8066, 8066, 5243},
- {11916, 7490, 7490, 11916, 4660, 11916, 7490, 7490, 7490, 7490, 4660, 11916, 4660, 7490, 7490, 4660},
- {10082, 6554, 6554, 10082, 4194, 10082, 6554, 6554, 6554, 6554, 4194, 10082, 4194, 6554, 6554, 4194},
- {9362, 5825, 5825, 9362, 3647, 9362, 5825, 5825, 5825, 5825, 3647, 9362, 3647, 5825, 5825, 3647},
- {8192, 5243, 5243, 8192, 3355, 8192, 5243, 5243, 5243, 5243, 3355, 8192, 3355, 5243, 5243, 3355},
- {7282, 4559, 4559, 7282, 2893, 7282, 4559, 4559, 4559, 4559, 2893, 7282, 2893, 4559, 4559, 2893}
-};
-
-/**
-Convert scan from raster scan order to block decoding order and
-from block decoding order to raster scan order. Same table!!!
-*/
-const static uint8 ras2dec[16] = {0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15};
-
-/* mapping from level_idc to index map */
-const static uint8 mapLev2Idx[61] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 1,
- 0, 1, 2, 3, 255, 255, 255, 255, 255, 255,
- 4, 5, 6, 255, 255, 255, 255, 255, 255, 255,
- 7, 8, 9, 255, 255, 255, 255, 255, 255, 255,
- 10, 11, 12, 255, 255, 255, 255, 255, 255, 255,
- 13, 14, 255, 255, 255, 255, 255, 255, 255, 255
- };
-/* map back from index to Level IDC */
-const static uint8 mapIdx2Lev[MAX_LEVEL_IDX] = {10, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51};
-
-/**
-from the index map to the MaxDPB value times 2 */
-const static int32 MaxDPBX2[MAX_LEVEL_IDX] = {297, 675, 1782, 1782, 1782, 3564, 6075, 6075,
- 13500, 15360, 24576, 24576, 24576, 82620, 138240
- };
-
-/* map index to the max frame size */
-const static int MaxFS[MAX_LEVEL_IDX] = {99, 396, 396, 396, 396, 792, 1620, 1620, 3600, 5120,
- 8192, 8192, 8192, 22080, 36864
- };
-
-/* map index to max MB processing rate */
-const static int32 MaxMBPS[MAX_LEVEL_IDX] = {1485, 3000, 6000, 11880, 11880, 19800, 20250, 40500,
- 108000, 216000, 245760, 245760, 491520, 589824, 983040
- };
-
-/* map index to max video bit rate */
-const static uint32 MaxBR[MAX_LEVEL_IDX] = {64, 192, 384, 768, 2000, 4000, 4000, 10000, 14000, 20000,
- 20000, 50000, 50000, 135000, 240000
- };
-
-/* map index to max CPB size */
-const static uint32 MaxCPB[MAX_LEVEL_IDX] = {175, 500, 1000, 2000, 2000, 4000, 4000, 10000, 14000,
- 20000, 25000, 62500, 62500, 135000, 240000
- };
-
-/* map index to max vertical MV range */
-const static int MaxVmvR[MAX_LEVEL_IDX] = {64, 128, 128, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512};
-
-#endif /* _AVCINT_COMMON_H_ */
diff --git a/media/libstagefright/codecs/avc/common/include/avclib_common.h b/media/libstagefright/codecs/avc/common/include/avclib_common.h
deleted file mode 100644
index cbbf0c6..0000000
--- a/media/libstagefright/codecs/avc/common/include/avclib_common.h
+++ /dev/null
@@ -1,557 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains declarations of internal functions for common encoder/decoder library.
-@publishedAll
-*/
-#ifndef AVCCOMMON_LIB_H_INCLUDED
-#define AVCCOMMON_LIB_H_INCLUDED
-
-#include <stdlib.h>
-
-#ifndef AVCINT_COMMON_H_INCLUDED
-#include "avcint_common.h"
-#endif
-
-/*----------- deblock.c --------------*/
-/**
-This function performs conditional deblocking on a complete picture.
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS for success and AVC_FAIL otherwise."
-*/
-OSCL_IMPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video);
-
-/**
-This function performs MB-based deblocking when MB_BASED_DEBLOCK
-is defined at compile time.
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS for success and AVC_FAIL otherwise."
-*/
-void MBInLoopDeblock(AVCCommonObj *video);
-
-
-/*---------- dpb.c --------------------*/
-/**
-This function is called everytime a new sequence is detected.
-\param "avcHandle" "Pointer to AVCHandle."
-\param "video" "Pointer to AVCCommonObj."
-\param "padding" "Flag specifying whether padding in luma component is needed (used for encoding)."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-OSCL_IMPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding);
-
-/**
-This function allocates and initializes the decoded picture buffer structure based on
-the profile and level for the first sequence parameter set. Currently,
-it does not allow changing in profile/level for subsequent SPS.
-\param "avcHandle" "Pointer to AVCHandle."
-\param "video" "Pointer to AVCCommonObj."
-\param "FrameHeightInMbs" "Height of the frame in the unit of MBs."
-\param "PicWidthInMbs" "Width of the picture in the unit of MBs."
-\param "padding" "Flag specifying whether padding in luma component is needed (used for encoding)."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-AVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding);
-
-/**
-This function frees the DPB memory.
-\param "avcHandle" "Pointer to AVCHandle."
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-OSCL_IMPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video);
-
-/**
-This function finds empty frame in the decoded picture buffer to be used for the
-current picture, initializes the corresponding picture structure with Sl, Scb, Scr,
-width, height and pitch.
-\param "avcHandle" "Pointer to the main handle object."
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-OSCL_IMPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video);
-/**
-This function finds empty frame in the decoded picture buffer to be used for the
-current picture, initializes the corresponding picture structure with Sl, Scb, Scr,
-width, height and pitch.
-\param "video" "Pointer to AVCCommonObj."
-\param "CurrPicNum" "Current picture number (only used in decoder)."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-
-OSCL_IMPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum);
-
-/**
-This function releases the current frame back to the available pool for skipped frame after encoding.
-\param "avcHandle" "Pointer to the main handle object."
-\param "video" "Pointer to the AVCCommonObj."
-\return "void."
-*/
-OSCL_IMPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video);
-
-/**
-This function performs decoded reference picture marking process and store the current picture to the
-corresponding frame storage in the decoded picture buffer.
-\param "avcHandle" "Pointer to the main handle object."
-\param "video" "Pointer to the AVCCommonObj."
-\return "AVC_SUCCESS or AVC_FAIL."
-*/
-OSCL_IMPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video);
-
-/**
-This function perform sliding window operation on the reference picture lists, see subclause 8.2.5.3.
-It removes short-term ref frames with smallest FrameNumWrap from the reference list.
-\param "avcHandle" "Pointer to the main handle object."
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\return "AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft)."
-*/
-AVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb);
-
-
-/**
-This function perform adaptive memory marking operation on the reference picture lists,
-see subclause 8.2.5.4. It calls other functions for specific operations.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "sliceHdr" "Pointer to the AVCSliceHeader."
-\return "AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft)."
-*/
-AVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr);
-
-/**
-This function performs memory management control operation 1, marking a short-term picture
-as unused for reference. See subclause 8.2.5.4.1.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "difference_of_pic_nums_minus1" "From the syntax in dec_ref_pic_marking()."
-*/
-void MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1);
-
-/**
-This function performs memory management control operation 2, marking a long-term picture
-as unused for reference. See subclause 8.2.5.4.2.
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "field_pic_flag" "Flag whether the current picture is field or not."
-\param "long_term_pic_num" "From the syntax in dec_ref_pic_marking()."
-*/
-void MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num);
-
-/**
-This function performs memory management control operation 3, assigning a LongTermFrameIdx to
-a short-term reference picture. See subclause 8.2.5.4.3.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "difference_of_pic_nums_minus1" "From the syntax in dec_ref_pic_marking()."
-\param "long_term_pic_num" "From the syntax in dec_ref_pic_marking()."
-*/
-void MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1,
- uint long_term_frame_idx);
-
-/**
-This function performs memory management control operation 4, getting new MaxLongTermFrameIdx.
- See subclause 8.2.5.4.4.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "max_long_term_frame_idx_plus1" "From the syntax in dec_ref_pic_marking()."
-*/
-void MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1);
-
-/**
-This function performs memory management control operation 5, marking all reference pictures
-as unused for reference and set MaxLongTermFrameIdx to no long-termframe indices.
- See subclause 8.2.5.4.5.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-*/
-void MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb);
-
-/**
-This function performs memory management control operation 6, assigning a long-term frame index
-to the current picture. See subclause 8.2.5.4.6.
-\param "video" "Pointer to the AVCCommonObj."
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "long_term_frame_idx" "From the syntax in dec_ref_pic_marking()."
-*/
-void MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx);
-
-/**
-This function mark a long-term ref frame with a specific frame index as unused for reference.
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "long_term_frame_idx" "To look for"
-*/
-void unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx);
-
-/**
-This function mark a long-term ref field with a specific frame index as unused for reference except
-a frame that contains a picture with picNumX.
-\param "dpb" "Pointer to the AVCDecPicBuffer."
-\param "long_term_frame_idx" "To look for."
-\param "picNumX" "To look for."
-*/
-void unmark_long_term_field_for_reference_by_frame_idx(AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_indx, int picNumX);
-
-/**
-This function mark a frame to unused for reference.
-\param "fs" "Pointer to AVCFrameStore to be unmarked."
-*/
-void unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx);
-
-void update_ref_list(AVCDecPicBuffer *dpb);
-
-
-/*---------- fmo.c --------------*/
-/**
-This function initializes flexible macroblock reordering.
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS for success and AVC_FAIL otherwise."
-*/
-OSCL_IMPORT_REF AVCStatus FMOInit(AVCCommonObj *video);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following the interleaved slice group map type.
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "run_length_minus1" "Array of the run-length."
-\param "num_slice_groups_minus_1" "Number of slice group minus 1."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "Void."
-*/
-void FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following the dispersed slice group map type.
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "PicWidthInMbs" "Width of the luma picture in macroblock unit."
-\param "num_slice_groups_minus_1" "Number of slice group minus 1."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "Void."
-*/
-void FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following the foreground with left-over slice group map type.
-\param "pps" "Pointer to AVCPicParamSets structure."
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "PicWidthInMbs" "Width of the luma picture in macroblock unit."
-\param "num_slice_groups_minus_1" "Number of slice group minus 1."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "Void."
-*/
-void FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs,
- uint num_slice_groups_minus1, uint PicSizeInMapUnits);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following the box-out slice group map type.
-\param "pps" "Pointer to AVCPicParamSets structure."
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "PicWidthInMbs" "Width of the luma picture in macroblock unit."
-\return "Void."
-*/
-void FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap,
- int PicWidthInMbs);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following the raster scan slice group map type.
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "MapUnitsInSliceGroup0" "Derived in subclause 7.4.3."
-\param "slice_group_change_direction_flag" "A value from the slice header."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "void"
-*/
-void FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0,
- int slice_group_change_direction_flag, uint PicSizeInMapUnits);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following wipe slice group map type.
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "video" "Pointer to AVCCommonObj structure."
-\param "slice_group_change_direction_flag" "A value from the slice header."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "void"
-*/
-void FmoGenerateType5MapUnitMap(int *mapUnitsToSliceGroupMap, AVCCommonObj *video,
- int slice_group_change_direction_flag, uint PicSizeInMapUnits);
-
-/**
-This function fills up an array that maps Map unit to the slice group
-following wipe slice group map type.
-\param "mapUnitToSliceGroupMap" "Array of slice group mapping."
-\param "slice_group_id" "Array of slice_group_id from AVCPicParamSet structure."
-\param "PicSizeInMapUnit" "Size of the picture in number Map units."
-\return "void"
-*/
-void FmoGenerateType6MapUnitMap(int *mapUnitsToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits);
-
-/*------------- itrans.c --------------*/
-/**
-This function performs transformation of the Intra16x16DC value according to
-subclause 8.5.6.
-\param "block" "Pointer to the video->block[0][0][0]."
-\param "QPy" "Quantization parameter."
-\return "void."
-*/
-void Intra16DCTrans(int16 *block, int Qq, int Rq);
-
-/**
-This function performs transformation of a 4x4 block according to
-subclause 8.5.8.
-\param "block" "Pointer to the origin of transform coefficient area."
-\param "pred" "Pointer to the origin of predicted area."
-\param "cur" "Pointer to the origin of the output area."
-\param "width" "Pitch of cur."
-\return "void."
-*/
-void itrans(int16 *block, uint8 *pred, uint8 *cur, int width);
-
-/*
-This function is the same one as itrans except for chroma.
-\param "block" "Pointer to the origin of transform coefficient area."
-\param "pred" "Pointer to the origin of predicted area."
-\param "cur" "Pointer to the origin of the output area."
-\param "width" "Pitch of cur."
-\return "void."
-*/
-void ictrans(int16 *block, uint8 *pred, uint8 *cur, int width);
-
-/**
-This function performs transformation of the DCChroma value according to
-subclause 8.5.7.
-\param "block" "Pointer to the video->block[0][0][0]."
-\param "QPc" "Quantization parameter."
-\return "void."
-*/
-void ChromaDCTrans(int16 *block, int Qq, int Rq);
-
-/**
-This function copies a block from pred to cur.
-\param "pred" "Pointer to prediction block."
-\param "cur" "Pointer to the current YUV block."
-\param "width" "Pitch of cur memory."
-\param "pred_pitch" "Pitch for pred memory.
-\return "void."
-*/
-void copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch);
-
-/*--------- mb_access.c ----------------*/
-/**
-This function initializes the neighboring information before start macroblock decoding.
-\param "video" "Pointer to AVCCommonObj."
-\param "mbNum" "The current macroblock index."
-\param "currMB" "Pointer to the current AVCMacroblock structure."
-\return "void"
-*/
-OSCL_IMPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum);
-
-/**
-This function checks whether the requested neighboring macroblock is available.
-\param "MbToSliceGroupMap" "Array containing the slice group ID mapping to MB index."
-\param "PicSizeInMbs" "Size of the picture in number of MBs."
-\param "mbAddr" "Neighboring macroblock index to check."
-\param "currMbAddr" "Current macroblock index."
-\return "TRUE if the neighboring MB is available, FALSE otherwise."
-*/
-bool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr);
-
-/**
-This function performs prediction of the nonzero coefficient for a luma block (i,j).
-\param "video" "Pointer to AVCCommonObj."
-\param "i" "Block index, horizontal."
-\param "j" "Block index, vertical."
-\return "Predicted number of nonzero coefficient."
-*/
-OSCL_IMPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j);
-
-/**
-This function performs prediction of the nonzero coefficient for a chroma block (i,j).
-\param "video" "Pointer to AVCCommonObj."
-\param "i" "Block index, horizontal."
-\param "j" "Block index, vertical."
-\return "Predicted number of nonzero coefficient."
-*/
-OSCL_IMPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j);
-
-/**
-This function calculates the predicted motion vectors for the current macroblock.
-\param "video" "Pointer to AVCCommonObj."
-\param "encFlag" "Boolean whether this function is used by encoder or decoder."
-\return "void."
-*/
-OSCL_IMPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag);
-
-/*---------- reflist.c -----------------*/
-/**
-This function initializes reference picture list used in INTER prediction
-at the beginning of each slice decoding. See subclause 8.2.4.
-\param "video" "Pointer to AVCCommonObj."
-\return "void"
-Output is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size.
-*/
-OSCL_IMPORT_REF void RefListInit(AVCCommonObj *video);
-
-/**
-This function generates picture list from frame list. Used when current picture is field.
-see subclause 8.2.4.2.5.
-\param "video" "Pointer to AVCCommonObj."
-\param "IsL1" "Is L1 list?"
-\param "long_term" "Is long-term prediction?"
-\return "void"
-*/
-void GenPicListFromFrameList(AVCCommonObj *video, int IsL1, int long_term);
-
-/**
-This function performs reference picture list reordering according to the
-ref_pic_list_reordering() syntax. See subclause 8.2.4.3.
-\param "video" "Pointer to AVCCommonObj."
-\return "AVC_SUCCESS or AVC_FAIL"
-Output is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size.
-*/
-OSCL_IMPORT_REF AVCStatus ReOrderList(AVCCommonObj *video);
-
-/**
-This function performs reference picture list reordering according to the
-ref_pic_list_reordering() syntax regardless of list 0 or list 1. See subclause 8.2.4.3.
-\param "video" "Pointer to AVCCommonObj."
-\param "isL1" "Is list 1 or not."
-\return "AVC_SUCCESS or AVC_FAIL"
-Output is video->RefPicList0 and video->refList0Size or video->RefPicList1 and video->refList1Size.
-*/
-AVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1);
-
-/**
-This function performs reordering process of reference picture list for short-term pictures.
-See subclause 8.2.4.3.1.
-\param "video" "Pointer to AVCCommonObj."
-\param "picNumLX" "picNumLX of an entry in the reference list."
-\param "refIdxLX" "Pointer to the current entry index in the reference."
-\param "isL1" "Is list 1 or not."
-\return "AVC_SUCCESS or AVC_FAIL"
-*/
-AVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1);
-
-/**
-This function performs reordering process of reference picture list for long-term pictures.
-See subclause 8.2.4.3.2.
-\param "video" "Pointer to AVCCommonObj."
-\param "LongTermPicNum" "LongTermPicNum of an entry in the reference list."
-\param "refIdxLX" "Pointer to the current entry index in the reference."
-\param "isL1" "Is list 1 or not."
-\return "AVC_SUCCESS or AVC_FAIL"
-*/
-AVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1);
-
-/**
-This function gets the pictures in DPB according to the PicNum.
-\param "video" "Pointer to AVCCommonObj."
-\param "picNum" "PicNum of the picture we are looking for."
-\return "Pointer to the AVCPictureData or NULL if not found"
-*/
-AVCPictureData* GetShortTermPic(AVCCommonObj *video, int picNum);
-
-/**
-This function gets the pictures in DPB according to the LongtermPicNum.
-\param "video" "Pointer to AVCCommonObj."
-\param "LongtermPicNum" "LongtermPicNum of the picture we are looking for."
-\return "Pointer to the AVCPictureData."
-*/
-AVCPictureData* GetLongTermPic(AVCCommonObj *video, int LongtermPicNum);
-
-/**
-This function indicates whether the picture is used for short-term reference or not.
-\param "s" "Pointer to AVCPictureData."
-\return "1 if it is used for short-term, 0 otherwise."
-*/
-int is_short_ref(AVCPictureData *s);
-
-/**
-This function indicates whether the picture is used for long-term reference or not.
-\param "s" "Pointer to AVCPictureData."
-\return "1 if it is used for long-term, 0 otherwise."
-*/
-int is_long_ref(AVCPictureData *s);
-
-/**
-This function sorts array of pointers to AVCPictureData in descending order of
-the PicNum value.
-\param "data" "Array of pointers to AVCPictureData."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortPicByPicNum(AVCPictureData *data[], int num);
-
-/**
-This function sorts array of pointers to AVCPictureData in ascending order of
-the PicNum value.
-\param "data" "Array of pointers to AVCPictureData."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortPicByPicNumLongTerm(AVCPictureData *data[], int num);
-
-/**
-This function sorts array of pointers to AVCFrameStore in descending order of
-the FrameNumWrap value.
-\param "data" "Array of pointers to AVCFrameStore."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortFrameByFrameNumWrap(AVCFrameStore *data[], int num);
-
-/**
-This function sorts array of pointers to AVCFrameStore in ascending order of
-the LongTermFrameIdx value.
-\param "data" "Array of pointers to AVCFrameStore."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortFrameByLTFrameIdx(AVCFrameStore *data[], int num);
-
-/**
-This function sorts array of pointers to AVCPictureData in descending order of
-the PicOrderCnt value.
-\param "data" "Array of pointers to AVCPictureData."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortPicByPOC(AVCPictureData *data[], int num, int descending);
-
-/**
-This function sorts array of pointers to AVCPictureData in ascending order of
-the LongTermPicNum value.
-\param "data" "Array of pointers to AVCPictureData."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortPicByLTPicNum(AVCPictureData *data[], int num);
-
-/**
-This function sorts array of pointers to AVCFrameStore in descending order of
-the PicOrderCnt value.
-\param "data" "Array of pointers to AVCFrameStore."
-\param "num" "Size of the array."
-\return "void"
-*/
-void SortFrameByPOC(AVCFrameStore *data[], int num, int descending);
-
-
-#endif /* _AVCCOMMON_LIB_H_ */
diff --git a/media/libstagefright/codecs/avc/common/src/deblock.cpp b/media/libstagefright/codecs/avc/common/src/deblock.cpp
deleted file mode 100644
index 5f8b693..0000000
--- a/media/libstagefright/codecs/avc/common/src/deblock.cpp
+++ /dev/null
@@ -1,1667 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-
-#include <string.h>
-
-#include "avclib_common.h"
-
-#define MAX_QP 51
-#define MB_BLOCK_SIZE 16
-
-// NOTE: these 3 tables are for funtion GetStrength() only
-const static int ININT_STRENGTH[4] = {0x04040404, 0x03030303, 0x03030303, 0x03030303};
-
-
-// NOTE: these 3 tables are for funtion EdgeLoop() only
-// NOTE: to change the tables below for instance when the QP doubling is changed from 6 to 8 values
-
-const static int ALPHA_TABLE[52] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 17, 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, 63, 71, 80, 90, 101, 113, 127, 144, 162, 182, 203, 226, 255, 255} ;
-const static int BETA_TABLE[52] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18} ;
-const static int CLIP_TAB[52][5] =
-{
- { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0},
- { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0},
- { 0, 0, 0, 0, 0}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 1, 1, 1, 1},
- { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 2, 3, 3},
- { 0, 1, 2, 3, 3}, { 0, 2, 2, 3, 3}, { 0, 2, 2, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 3, 3, 5, 5}, { 0, 3, 4, 6, 6}, { 0, 3, 4, 6, 6},
- { 0, 4, 5, 7, 7}, { 0, 4, 5, 8, 8}, { 0, 4, 6, 9, 9}, { 0, 5, 7, 10, 10}, { 0, 6, 8, 11, 11}, { 0, 6, 8, 13, 13}, { 0, 7, 10, 14, 14}, { 0, 8, 11, 16, 16},
- { 0, 9, 12, 18, 18}, { 0, 10, 13, 20, 20}, { 0, 11, 15, 23, 23}, { 0, 13, 17, 25, 25}
-};
-
-// NOTE: this table is only QP clipping, index = QP + video->FilterOffsetA/B, clipped to [0, 51]
-// video->FilterOffsetA/B is in {-12, 12]
-const static int QP_CLIP_TAB[76] =
-{
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // [-12, 0]
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // [1, 51]
- 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51 // [52,63]
-};
-
-static void DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV);
-//static void GetStrength(AVCCommonObj *video, uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir, int edge);
-static void GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir);
-static void GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ);
-static void GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ);
-static void EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);
-static void EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);
-static void EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);
-static void EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);
-
-/*
- *****************************************************************************************
- * \brief Filter all macroblocks in order of increasing macroblock address.
- *****************************************************************************************
-*/
-
-OSCL_EXPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video)
-{
- uint i, j;
- int pitch = video->currPic->pitch, pitch_c, width;
- uint8 *SrcY, *SrcU, *SrcV;
-
- SrcY = video->currPic->Sl; // pointers to source
- SrcU = video->currPic->Scb;
- SrcV = video->currPic->Scr;
- pitch_c = pitch >> 1;
- width = video->currPic->width;
-
- for (i = 0; i < video->PicHeightInMbs; i++)
- {
- for (j = 0; j < video->PicWidthInMbs; j++)
- {
- DeblockMb(video, j, i, SrcY, SrcU, SrcV);
- // update SrcY, SrcU, SrcV
- SrcY += MB_BLOCK_SIZE;
- SrcU += (MB_BLOCK_SIZE >> 1);
- SrcV += (MB_BLOCK_SIZE >> 1);
- }
-
- SrcY += ((pitch << 4) - width);
- SrcU += ((pitch_c << 3) - (width >> 1));
- SrcV += ((pitch_c << 3) - (width >> 1));
- }
-
- return AVC_SUCCESS;
-}
-
-#ifdef MB_BASED_DEBLOCK
-/*
- *****************************************************************************************
- * \brief Filter one macroblocks in a fast macroblock memory and copy it to frame
- *****************************************************************************************
-*/
-void MBInLoopDeblock(AVCCommonObj *video)
-{
- AVCPictureData *currPic = video->currPic;
-#ifdef USE_PRED_BLOCK
- uint8 *predCb, *predCr, *pred_block;
- int i, j, dst_width, dst_height, dst_widthc, dst_heightc;
-#endif
- int pitch = currPic->pitch;
- int x_pos = video->mb_x;
- int y_pos = video->mb_y;
- uint8 *curL, *curCb, *curCr;
- int offset;
-
- offset = (y_pos << 4) * pitch;
-
- curL = currPic->Sl + offset + (x_pos << 4);
-
- offset >>= 2;
- offset += (x_pos << 3);
-
- curCb = currPic->Scb + offset;
- curCr = currPic->Scr + offset;
-
-#ifdef USE_PRED_BLOCK
- pred_block = video->pred;
-
- /* 1. copy neighboring pixels from frame to the video->pred_block */
- if (y_pos) /* not the 0th row */
- {
- /* copy to the top 4 lines of the macroblock */
- curL -= (pitch << 2); /* go back 4 lines */
-
- memcpy(pred_block + 4, curL, 16);
- curL += pitch;
- memcpy(pred_block + 24, curL, 16);
- curL += pitch;
- memcpy(pred_block + 44, curL, 16);
- curL += pitch;
- memcpy(pred_block + 64, curL, 16);
- curL += pitch;
-
- curCb -= (pitch << 1); /* go back 4 lines chroma */
- curCr -= (pitch << 1);
-
- pred_block += 400;
-
- memcpy(pred_block + 4, curCb, 8);
- curCb += (pitch >> 1);
- memcpy(pred_block + 16, curCb, 8);
- curCb += (pitch >> 1);
- memcpy(pred_block + 28, curCb, 8);
- curCb += (pitch >> 1);
- memcpy(pred_block + 40, curCb, 8);
- curCb += (pitch >> 1);
-
- pred_block += 144;
- memcpy(pred_block + 4, curCr, 8);
- curCr += (pitch >> 1);
- memcpy(pred_block + 16, curCr, 8);
- curCr += (pitch >> 1);
- memcpy(pred_block + 28, curCr, 8);
- curCr += (pitch >> 1);
- memcpy(pred_block + 40, curCr, 8);
- curCr += (pitch >> 1);
-
- pred_block = video->pred;
- }
-
- /* 2. perform deblocking. */
- DeblockMb(video, x_pos, y_pos, pred_block + 84, pred_block + 452, pred_block + 596);
-
- /* 3. copy it back to the frame and update pred_block */
- predCb = pred_block + 400;
- predCr = predCb + 144;
-
- /* find the range of the block inside pred_block to be copied back */
- if (y_pos) /* the first row */
- {
- curL -= (pitch << 2);
- curCb -= (pitch << 1);
- curCr -= (pitch << 1);
-
- dst_height = 20;
- dst_heightc = 12;
- }
- else
- {
- pred_block += 80;
- predCb += 48;
- predCr += 48;
- dst_height = 16;
- dst_heightc = 8;
- }
-
- if (x_pos) /* find the width */
- {
- curL -= 4;
- curCb -= 4;
- curCr -= 4;
- if (x_pos == (int)(video->PicWidthInMbs - 1))
- {
- dst_width = 20;
- dst_widthc = 12;
- }
- else
- {
- dst_width = 16;
- dst_widthc = 8;
- }
- }
- else
- {
- pred_block += 4;
- predCb += 4;
- predCr += 4;
- dst_width = 12;
- dst_widthc = 4;
- }
-
- /* perform copy */
- for (j = 0; j < dst_height; j++)
- {
- memcpy(curL, pred_block, dst_width);
- curL += pitch;
- pred_block += 20;
- }
- for (j = 0; j < dst_heightc; j++)
- {
- memcpy(curCb, predCb, dst_widthc);
- memcpy(curCr, predCr, dst_widthc);
- curCb += (pitch >> 1);
- curCr += (pitch >> 1);
- predCb += 12;
- predCr += 12;
- }
-
- if (x_pos != (int)(video->PicWidthInMbs - 1)) /* now copy from the right-most 4 columns to the left-most 4 columns */
- {
- pred_block = video->pred;
- for (i = 0; i < 20; i += 4)
- {
- *((uint32*)pred_block) = *((uint32*)(pred_block + 16));
- pred_block += 20;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 16));
- pred_block += 20;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 16));
- pred_block += 20;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 16));
- pred_block += 20;
- }
-
- for (i = 0; i < 24; i += 4)
- {
- *((uint32*)pred_block) = *((uint32*)(pred_block + 8));
- pred_block += 12;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 8));
- pred_block += 12;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 8));
- pred_block += 12;
- *((uint32*)pred_block) = *((uint32*)(pred_block + 8));
- pred_block += 12;
- }
-
- }
-#else
- DeblockMb(video, x_pos, y_pos, curL, curCb, curCr);
-#endif
-
- return ;
-}
-#endif
-
-/*
- *****************************************************************************************
- * \brief Deblocking filter for one macroblock.
- *****************************************************************************************
- */
-
-void DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV)
-{
- AVCMacroblock *MbP, *MbQ;
- int edge, QP, QPC;
- int filterLeftMbEdgeFlag = (mb_x != 0);
- int filterTopMbEdgeFlag = (mb_y != 0);
- int pitch = video->currPic->pitch;
- int indexA, indexB;
- int *tmp;
- int Alpha, Beta, Alpha_c, Beta_c;
- int mbNum = mb_y * video->PicWidthInMbs + mb_x;
- int *clipTable, *clipTable_c, *qp_clip_tab;
- uint8 Strength[16];
- void* str;
-
- MbQ = &(video->mblock[mbNum]); // current Mb
-
-
- // If filter is disabled, return
- if (video->sliceHdr->disable_deblocking_filter_idc == 1) return;
-
- if (video->sliceHdr->disable_deblocking_filter_idc == 2)
- {
- // don't filter at slice boundaries
- filterLeftMbEdgeFlag = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - 1, mbNum);
- filterTopMbEdgeFlag = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - video->PicWidthInMbs, mbNum);
- }
-
- /* NOTE: edge=0 and edge=1~3 are separate cases because of the difference of MbP, index A and indexB calculation */
- /* for edge = 1~3, MbP, indexA and indexB remain the same, and thus there is no need to re-calculate them for each edge */
-
- qp_clip_tab = (int *)QP_CLIP_TAB + 12;
-
- /* 1.VERTICAL EDGE + MB BOUNDARY (edge = 0) */
- if (filterLeftMbEdgeFlag)
- {
- MbP = MbQ - 1;
- //GetStrength(video, Strength, MbP, MbQ, 0, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge
- GetStrength_Edge0(Strength, MbP, MbQ, 0);
-
- str = (void*)Strength; //de-ref type-punned pointer fix
- if (*((uint32*)str)) // only if one of the 4 Strength bytes is != 0
- {
- QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks;
- indexA = QP + video->FilterOffsetA;
- indexB = QP + video->FilterOffsetB;
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha = ALPHA_TABLE[indexA];
- Beta = BETA_TABLE[indexB];
- clipTable = (int *) CLIP_TAB[indexA];
-
- if (Alpha > 0 && Beta > 0)
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Luma_vertical(SrcY, Strength, Alpha, Beta, clipTable, 20);
-#else
- EdgeLoop_Luma_vertical(SrcY, Strength, Alpha, Beta, clipTable, pitch);
-#endif
-
- QPC = (MbP->QPc + MbQ->QPc + 1) >> 1;
- indexA = QPC + video->FilterOffsetA;
- indexB = QPC + video->FilterOffsetB;
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha = ALPHA_TABLE[indexA];
- Beta = BETA_TABLE[indexB];
- clipTable = (int *) CLIP_TAB[indexA];
- if (Alpha > 0 && Beta > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, 12);
- EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, 12);
-#else
- EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1);
- EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1);
-#endif
- }
- }
-
- } /* end of: if(filterLeftMbEdgeFlag) */
-
- /* 2.VERTICAL EDGE (no boundary), the edges are all inside a MB */
- /* First calculate the necesary parameters all at once, outside the loop */
- MbP = MbQ;
-
- indexA = MbQ->QPy + video->FilterOffsetA;
- indexB = MbQ->QPy + video->FilterOffsetB;
- // index
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha = ALPHA_TABLE[indexA];
- Beta = BETA_TABLE[indexB];
- clipTable = (int *)CLIP_TAB[indexA];
-
- /* Save Alpha, Beta and clipTable for future use, with the obselete variables filterLeftMbEdgeFlag, mbNum amd tmp */
- filterLeftMbEdgeFlag = Alpha;
- mbNum = Beta;
- tmp = clipTable;
-
- indexA = MbQ->QPc + video->FilterOffsetA;
- indexB = MbQ->QPc + video->FilterOffsetB;
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha_c = ALPHA_TABLE[indexA];
- Beta_c = BETA_TABLE[indexB];
- clipTable_c = (int *)CLIP_TAB[indexA];
-
- GetStrength_VerticalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge
-
- for (edge = 1; edge < 4; edge++) // 4 vertical strips of 16 pel
- {
- //GetStrength_VerticalEdges(video, Strength, MbP, MbQ, 0, edge); // Strength for 4 blks in 1 stripe, 0 => vertical edge
- if (*((int*)(Strength + (edge << 2)))) // only if one of the 4 Strength bytes is != 0
- {
- if (Alpha > 0 && Beta > 0)
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2), Alpha, Beta, clipTable, 20);
-#else
- EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2), Alpha, Beta, clipTable, pitch);
-#endif
-
- if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);
- EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);
-#else
- EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);
- EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);
-#endif
- }
- }
-
- } //end edge
-
-
-
- /* 3.HORIZONTAL EDGE + MB BOUNDARY (edge = 0) */
- if (filterTopMbEdgeFlag)
- {
- MbP = MbQ - video->PicWidthInMbs;
- //GetStrength(video, Strength, MbP, MbQ, 1, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge
- GetStrength_Edge0(Strength, MbP, MbQ, 1);
- str = (void*)Strength; //de-ref type-punned pointer fix
- if (*((uint32*)str)) // only if one of the 4 Strength bytes is != 0
- {
- QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks;
- indexA = QP + video->FilterOffsetA;
- indexB = QP + video->FilterOffsetB;
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha = ALPHA_TABLE[indexA];
- Beta = BETA_TABLE[indexB];
- clipTable = (int *)CLIP_TAB[indexA];
-
- if (Alpha > 0 && Beta > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Luma_horizontal(SrcY, Strength, Alpha, Beta, clipTable, 20);
-#else
- EdgeLoop_Luma_horizontal(SrcY, Strength, Alpha, Beta, clipTable, pitch);
-#endif
- }
-
- QPC = (MbP->QPc + MbQ->QPc + 1) >> 1;
- indexA = QPC + video->FilterOffsetA;
- indexB = QPC + video->FilterOffsetB;
- indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)
- indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)
-
- Alpha = ALPHA_TABLE[indexA];
- Beta = BETA_TABLE[indexB];
- clipTable = (int *)CLIP_TAB[indexA];
- if (Alpha > 0 && Beta > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, 12);
- EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, 12);
-#else
- EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1);
- EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1);
-#endif
- }
- }
-
- } /* end of: if(filterTopMbEdgeFlag) */
-
-
- /* 4.HORIZONTAL EDGE (no boundary), the edges are inside a MB */
- MbP = MbQ;
-
- /* Recover Alpha, Beta and clipTable for edge!=0 with the variables filterLeftMbEdgeFlag, mbNum and tmp */
- /* Note that Alpha_c, Beta_c and clipTable_c for chroma is already calculated */
- Alpha = filterLeftMbEdgeFlag;
- Beta = mbNum;
- clipTable = tmp;
-
- GetStrength_HorizontalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge
-
- for (edge = 1; edge < 4; edge++) // 4 horicontal strips of 16 pel
- {
- //GetStrength(video, Strength, MbP, MbQ, 1, edge); // Strength for 4 blks in 1 stripe 1 => horizontal edge
- if (*((int*)(Strength + (edge << 2)))) // only if one of the 4 Strength bytes is != 0
- {
- if (Alpha > 0 && Beta > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*20, Strength + (edge << 2), Alpha, Beta, clipTable, 20);
-#else
- EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*pitch, Strength + (edge << 2), Alpha, Beta, clipTable, pitch);
-#endif
- }
-
- if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0)
- {
-#ifdef USE_PRED_BLOCK
- EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);
- EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);
-#else
- EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);
- EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);
-#endif
- }
- }
-
- } //end edge
-
- return;
-}
-
-/*
- *****************************************************************************************************
- * \brief returns a buffer of 4 Strength values for one stripe in a mb (for different Frame types)
- *****************************************************************************************************
-*/
-
-void GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir)
-{
- int tmp;
- int16 *ptrQ, *ptrP;
- void* vptr;
- uint8 *pStrength;
- void* refIdx;
-
- if (MbP->mbMode == AVC_I4 || MbP->mbMode == AVC_I16 ||
- MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)
- {
-
- *((int*)Strength) = ININT_STRENGTH[0]; // Start with Strength=3. or Strength=4 for Mb-edge
-
- }
- else // if not intra or SP-frame
- {
- *((int*)Strength) = 0;
-
- if (dir == 0) // Vertical Edge 0
- {
-
- //1. Check the ref_frame_id
- refIdx = (void*) MbQ->RefIdx; //de-ref type-punned pointer fix
- ptrQ = (int16*)refIdx;
- refIdx = (void*)MbP->RefIdx; //de-ref type-punned pointer fix
- ptrP = (int16*)refIdx;
- pStrength = Strength;
- if (ptrQ[0] != ptrP[1]) pStrength[0] = 1;
- if (ptrQ[2] != ptrP[3]) pStrength[2] = 1;
- pStrength[1] = pStrength[0];
- pStrength[3] = pStrength[2];
-
- //2. Check the non-zero coeff blocks (4x4)
- if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[3] != 0) pStrength[0] = 2;
- if (MbQ->nz_coeff[4] != 0 || MbP->nz_coeff[7] != 0) pStrength[1] = 2;
- if (MbQ->nz_coeff[8] != 0 || MbP->nz_coeff[11] != 0) pStrength[2] = 2;
- if (MbQ->nz_coeff[12] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2;
-
- //3. Only need to check the mv difference
- vptr = (void*)MbQ->mvL0; // for deref type-punned pointer
- ptrQ = (int16*)vptr;
- ptrP = (int16*)(MbP->mvL0 + 3); // points to 4x4 block #3 (the 4th column)
-
- // 1st blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 8;
- ptrP += 8;
-
- // 2nd blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 8;
- ptrP += 8;
-
- // 3rd blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 8;
- ptrP += 8;
-
- // 4th blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
- }
- else // Horizontal Edge 0
- {
-
- //1. Check the ref_frame_id
- refIdx = (void*)MbQ->RefIdx; //de-ref type-punned pointer
- ptrQ = (int16*)refIdx;
- refIdx = (void*)MbP->RefIdx; //de-ref type-punned pointer
- ptrP = (int16*)refIdx;
- pStrength = Strength;
- if (ptrQ[0] != ptrP[2]) pStrength[0] = 1;
- if (ptrQ[1] != ptrP[3]) pStrength[2] = 1;
- pStrength[1] = pStrength[0];
- pStrength[3] = pStrength[2];
-
- //2. Check the non-zero coeff blocks (4x4)
- if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[12] != 0) pStrength[0] = 2;
- if (MbQ->nz_coeff[1] != 0 || MbP->nz_coeff[13] != 0) pStrength[1] = 2;
- if (MbQ->nz_coeff[2] != 0 || MbP->nz_coeff[14] != 0) pStrength[2] = 2;
- if (MbQ->nz_coeff[3] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2;
-
- //3. Only need to check the mv difference
- vptr = (void*)MbQ->mvL0;
- ptrQ = (int16*)vptr;
- ptrP = (int16*)(MbP->mvL0 + 12); // points to 4x4 block #12 (the 4th row)
-
- // 1st blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 2;
- ptrP += 2;
-
- // 2nd blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 2;
- ptrP += 2;
-
- // 3rd blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pStrength++;
- ptrQ += 2;
- ptrP += 2;
-
- // 4th blk
- if (*pStrength == 0)
- {
- // check |mv difference| >= 4
- tmp = *ptrQ++ - *ptrP++;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *ptrQ-- - *ptrP--;
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- } /* end of: else if(dir == 0) */
-
- } /* end of: if( !(MbP->mbMode == AVC_I4 ...) */
-}
-
-
-void GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ)
-{
- int idx, tmp;
- int16 *ptr, *pmvx, *pmvy;
- uint8 *pnz;
- uint8 *pStrength, *pStr;
- void* refIdx;
-
- if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)
- {
- *((int*)Strength) = ININT_STRENGTH[1]; // Start with Strength=3. or Strength=4 for Mb-edge
- *((int*)(Strength + 4)) = ININT_STRENGTH[2];
- *((int*)(Strength + 8)) = ININT_STRENGTH[3];
- }
- else // Not intra or SP-frame
- {
-
- *((int*)Strength) = 0; // for non-intra MB, strength = 0, 1 or 2.
- *((int*)(Strength + 4)) = 0;
- *((int*)(Strength + 8)) = 0;
-
- //1. Check the ref_frame_id
- refIdx = (void*)MbQ->RefIdx; //de-ref type-punned pointer fix
- ptr = (int16*)refIdx;
- pStrength = Strength;
- if (ptr[0] != ptr[1]) pStrength[4] = 1;
- if (ptr[2] != ptr[3]) pStrength[6] = 1;
- pStrength[5] = pStrength[4];
- pStrength[7] = pStrength[6];
-
- //2. Check the nz_coeff block and mv difference
- pmvx = (int16*)(MbQ->mvL0 + 1); // points to 4x4 block #1,not #0
- pmvy = pmvx + 1;
- for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2
- {
- // first/third row : 1,2,3 or 9,10,12
- // Strength = 2 for a whole row
- pnz = MbQ->nz_coeff + (idx << 2);
- if (*pnz++ != 0) *pStrength = 2;
- if (*pnz++ != 0)
- {
- *pStrength = 2;
- *(pStrength + 4) = 2;
- }
- if (*pnz++ != 0)
- {
- *(pStrength + 4) = 2;
- *(pStrength + 8) = 2;
- }
- if (*pnz != 0) *(pStrength + 8) = 2;
-
- // Then Strength = 1
- if (*pStrength == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pmvx += 2;
- pmvy += 2;
- pStr = pStrength + 4;
-
- if (*pStr == 0)
- {
- //check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 2;
- pmvy += 2;
- pStr = pStrength + 8;
-
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- // Second/fourth row: 5,6,7 or 14,15,16
- // Strength = 2 for a whole row
- pnz = MbQ->nz_coeff + ((idx + 1) << 2);
- if (*pnz++ != 0) *(pStrength + 1) = 2;
- if (*pnz++ != 0)
- {
- *(pStrength + 1) = 2;
- *(pStrength + 5) = 2;
- }
- if (*pnz++ != 0)
- {
- *(pStrength + 5) = 2;
- *(pStrength + 9) = 2;
- }
- if (*pnz != 0) *(pStrength + 9) = 2;
-
- // Then Strength = 1
- pmvx += 4;
- pmvy += 4;
- pStr = pStrength + 1;
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 2;
- pmvy += 2;
- pStr = pStrength + 5;
-
- if (*pStr == 0)
- {
- //check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 2;
- pmvy += 2;
- pStr = pStrength + 9;
-
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 2);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- // update some variables for the next two rows
- pmvx += 4;
- pmvy += 4;
- pStrength += 2;
-
- } /* end of: for(idx=0; idx<2; idx++) */
-
- } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */
-}
-
-
-void GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ)
-{
- int idx, tmp;
- int16 *ptr, *pmvx, *pmvy;
- uint8 *pStrength, *pStr;
- void* refIdx;
-
- if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)
- {
- *((int*)Strength) = ININT_STRENGTH[1]; // Start with Strength=3. or Strength=4 for Mb-edge
- *((int*)(Strength + 4)) = ININT_STRENGTH[2];
- *((int*)(Strength + 8)) = ININT_STRENGTH[3];
- }
- else // Not intra or SP-frame
- {
-
- *((int*)Strength) = 0; // for non-intra MB, strength = 0, 1 or 2.
- *((int*)(Strength + 4)) = 0; // for non-intra MB, strength = 0, 1 or 2.
- *((int*)(Strength + 8)) = 0; // for non-intra MB, strength = 0, 1 or 2.
-
-
- //1. Check the ref_frame_id
- refIdx = (void*) MbQ->RefIdx; // de-ref type-punned fix
- ptr = (int16*) refIdx;
- pStrength = Strength;
- if (ptr[0] != ptr[2]) pStrength[4] = 1;
- if (ptr[1] != ptr[3]) pStrength[6] = 1;
- pStrength[5] = pStrength[4];
- pStrength[7] = pStrength[6];
-
- //2. Check the nz_coeff block and mv difference
- pmvx = (int16*)(MbQ->mvL0 + 4); // points to 4x4 block #4,not #0
- pmvy = pmvx + 1;
- for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2
- {
- // first/third row : 1,2,3 or 9,10,12
- // Strength = 2 for a whole row
- if (MbQ->nz_coeff[idx] != 0) *pStrength = 2;
- if (MbQ->nz_coeff[4+idx] != 0)
- {
- *pStrength = 2;
- *(pStrength + 4) = 2;
- }
- if (MbQ->nz_coeff[8+idx] != 0)
- {
- *(pStrength + 4) = 2;
- *(pStrength + 8) = 2;
- }
- if (MbQ->nz_coeff[12+idx] != 0) *(pStrength + 8) = 2;
-
- // Then Strength = 1
- if (*pStrength == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStrength = 1;
- }
-
- pmvx += 8;
- pmvy += 8;
- pStr = pStrength + 4;
-
- if (*pStr == 0)
- {
- //check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 8;
- pmvy += 8;
- pStr = pStrength + 8;
-
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- // Second/fourth row: 5,6,7 or 14,15,16
- // Strength = 2 for a whole row
- if (MbQ->nz_coeff[idx+1] != 0) *(pStrength + 1) = 2;
- if (MbQ->nz_coeff[4+idx+1] != 0)
- {
- *(pStrength + 1) = 2;
- *(pStrength + 5) = 2;
- }
- if (MbQ->nz_coeff[8+idx+1] != 0)
- {
- *(pStrength + 5) = 2;
- *(pStrength + 9) = 2;
- }
- if (MbQ->nz_coeff[12+idx+1] != 0) *(pStrength + 9) = 2;
-
- // Then Strength = 1
- pmvx -= 14;
- pmvy -= 14; // -14 = -16 + 2
- pStr = pStrength + 1;
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 8;
- pmvy += 8;
- pStr = pStrength + 5;
-
- if (*pStr == 0)
- {
- //check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- pmvx += 8;
- pmvy += 8;
- pStr = pStrength + 9;
-
- if (*pStr == 0)
- {
- //within the same 8x8 block, no need to check the reference id
- //only need to check the |mv difference| >= 4
- tmp = *pmvx - *(pmvx - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
-
- tmp = *pmvy - *(pmvy - 8);
- if (tmp < 0) tmp = -tmp;
- if (tmp >= 4) *pStr = 1;
- }
-
- // update some variables for the next two rows
- pmvx -= 14;
- pmvy -= 14; // -14 = -16 + 2
- pStrength += 2;
-
- } /* end of: for(idx=0; idx<2; idx++) */
-
- } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */
-}
-
-/*
- *****************************************************************************************
- * \brief Filters one edge of 16 (luma) or 8 (chroma) pel
- *****************************************************************************************
-*/
-
-void EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)
-{
- int pel, ap = 0, aq = 0, Strng;
- int C0, c0, dif, AbsDelta, tmp, tmp1;
- int L2 = 0, L1, L0, R0, R1, R2 = 0, RL0;
-
-
- if (Strength[0] == 4) /* INTRA strong filtering */
- {
- for (pel = 0; pel < 16; pel++)
- {
- R0 = SrcPtr[0];
- R1 = SrcPtr[pitch];
- L0 = SrcPtr[-pitch];
- L1 = SrcPtr[-(pitch<<1)];
-
- // |R0 - R1| < Beta
- tmp1 = R0 - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp = (tmp1 - Beta);
-
- //|L0 - L1| < Beta
- tmp1 = L0 - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|R0 - L0| < Alpha
- AbsDelta = R0 - L0;
- if (AbsDelta < 0) AbsDelta = -AbsDelta;
- tmp &= (AbsDelta - Alpha);
-
- if (tmp < 0)
- {
- AbsDelta -= ((Alpha >> 2) + 2);
- R2 = SrcPtr[pitch<<1]; //inc2
- L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3
-
- // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2)
- tmp = R0 - R2;
- if (tmp < 0) tmp = -tmp;
- aq = AbsDelta & (tmp - Beta);
-
- // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2)
- tmp = L0 - L2;
- if (tmp < 0) tmp = -tmp;
- ap = AbsDelta & (tmp - Beta);
-
- if (aq < 0)
- {
- tmp = R1 + R0 + L0;
- SrcPtr[0] = (L1 + (tmp << 1) + R2 + 4) >> 3;
- tmp += R2;
- SrcPtr[pitch] = (tmp + 2) >> 2;
- SrcPtr[pitch<<1] = (((SrcPtr[(pitch+(pitch<<1))] + R2) << 1) + tmp + 4) >> 3;
- }
- else
- SrcPtr[0] = ((R1 << 1) + R0 + L1 + 2) >> 2;
-
- if (ap < 0)
- {
- tmp = L1 + R0 + L0;
- SrcPtr[-pitch] = (R1 + (tmp << 1) + L2 + 4) >> 3;
- tmp += L2;
- SrcPtr[-(pitch<<1)] = (tmp + 2) >> 2;
- SrcPtr[-(pitch+(pitch<<1))] = (((SrcPtr[-(pitch<<2)] + L2) << 1) + tmp + 4) >> 3;
- }
- else
- SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2;
-
- } /* if(tmp < 0) */
-
- SrcPtr ++; // Increment to next set of pixel
-
- } /* end of: for(pel=0; pel<16; pel++) */
-
- } /* if(Strength[0] == 4) */
-
- else /* Normal filtering */
- {
- for (pel = 0; pel < 16; pel++)
- {
- Strng = Strength[pel >> 2];
- if (Strng)
- {
- R0 = SrcPtr[0];
- R1 = SrcPtr[pitch];
- L0 = SrcPtr[-pitch];
- L1 = SrcPtr[-(pitch<<1)]; // inc2
-
- //|R0 - L0| < Alpha
- tmp1 = R0 - L0;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp = (tmp1 - Alpha);
-
- // |R0 - R1| < Beta
- tmp1 = R0 - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|L0 - L1| < Beta
- tmp1 = L0 - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- if (tmp < 0)
- {
- R2 = SrcPtr[pitch<<1]; //inc2
- L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3
-
- // |R0 - R2| < Beta
- tmp = R0 - R2;
- if (tmp < 0) tmp = -tmp;
- aq = tmp - Beta;
-
- // |L0 - L2| < Beta
- tmp = L0 - L2;
- if (tmp < 0) tmp = -tmp;
- ap = tmp - Beta;
-
-
- c0 = C0 = clipTable[Strng];
- if (ap < 0) c0++;
- if (aq < 0) c0++;
-
- //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);
- dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;
- tmp = dif + c0;
- if ((uint)tmp > (uint)c0 << 1)
- {
- tmp = ~(tmp >> 31);
- dif = (tmp & (c0 << 1)) - c0;
- }
-
- //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif);
- //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);
- RL0 = R0 + L0;
- R0 -= dif;
- L0 += dif;
- if ((uint)R0 > 255)
- {
- tmp = ~(R0 >> 31);
- R0 = tmp & 255;
- }
- if ((uint)L0 > 255)
- {
- tmp = ~(L0 >> 31);
- L0 = tmp & 255;
- }
- SrcPtr[-pitch] = L0;
- SrcPtr[0] = R0;
-
- if (C0 != 0) /* Multiple zeros in the clip tables */
- {
- if (aq < 0) // SrcPtr[inc] += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1);
- {
- R2 = (R2 + ((RL0 + 1) >> 1) - (R1 << 1)) >> 1;
- tmp = R2 + C0;
- if ((uint)tmp > (uint)C0 << 1)
- {
- tmp = ~(tmp >> 31);
- R2 = (tmp & (C0 << 1)) - C0;
- }
- SrcPtr[pitch] += R2;
- }
-
- if (ap < 0) //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1);
- {
- L2 = (L2 + ((RL0 + 1) >> 1) - (L1 << 1)) >> 1;
- tmp = L2 + C0;
- if ((uint)tmp > (uint)C0 << 1)
- {
- tmp = ~(tmp >> 31);
- L2 = (tmp & (C0 << 1)) - C0;
- }
- SrcPtr[-(pitch<<1)] += L2;
- }
- }
-
- } /* if(tmp < 0) */
-
- } /* end of: if((Strng = Strength[pel >> 2])) */
-
- SrcPtr ++; // Increment to next set of pixel
-
- } /* for(pel=0; pel<16; pel++) */
-
- } /* else if(Strength[0] == 4) */
-}
-
-void EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)
-{
- int pel, ap = 1, aq = 1;
- int C0, c0, dif, AbsDelta, Strng, tmp, tmp1;
- int L2 = 0, L1, L0, R0, R1, R2 = 0;
- uint8 *ptr, *ptr1;
- uint R_in, L_in;
- uint R_out, L_out;
-
-
- if (Strength[0] == 4) /* INTRA strong filtering */
- {
-
- for (pel = 0; pel < 16; pel++)
- {
-
- // Read 8 pels
- R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0}
- L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}
- R1 = (R_in >> 8) & 0xff;
- R0 = R_in & 0xff;
- L0 = L_in >> 24;
- L1 = (L_in >> 16) & 0xff;
-
- // |R0 - R1| < Beta
- tmp1 = (R_in & 0xff) - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp = (tmp1 - Beta);
-
-
- //|L0 - L1| < Beta
- tmp1 = (L_in >> 24) - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|R0 - L0| < Alpha
- AbsDelta = (R_in & 0xff) - (L_in >> 24);
- if (AbsDelta < 0) AbsDelta = -AbsDelta;
- tmp &= (AbsDelta - Alpha);
-
- if (tmp < 0)
- {
- AbsDelta -= ((Alpha >> 2) + 2);
- R2 = (R_in >> 16) & 0xff;
- L2 = (L_in >> 8) & 0xff;
-
- // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2)
- tmp1 = (R_in & 0xff) - R2;
- if (tmp1 < 0) tmp1 = -tmp1;
- aq = AbsDelta & (tmp1 - Beta);
-
- // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2)
- tmp1 = (L_in >> 24) - L2;
- if (tmp1 < 0) tmp1 = -tmp1;
- ap = AbsDelta & (tmp1 - Beta);
-
-
- ptr = SrcPtr;
- if (aq < 0)
- {
- R_out = (R_in >> 24) << 24; // Keep R3 at the fourth byte
-
- tmp = R0 + L0 + R1;
- R_out |= (((tmp << 1) + L1 + R2 + 4) >> 3);
- tmp += R2;
- R_out |= (((tmp + 2) >> 2) << 8);
- tmp1 = ((R_in >> 24) + R2) << 1;
- R_out |= (((tmp1 + tmp + 4) >> 3) << 16);
-
- *((uint *)SrcPtr) = R_out;
- }
- else
- *ptr = ((R1 << 1) + R0 + L1 + 2) >> 2;
-
-
- if (ap < 0)
- {
- L_out = (L_in << 24) >> 24; // Keep L3 at the first byte
-
- tmp = R0 + L0 + L1;
- L_out |= ((((tmp << 1) + R1 + L2 + 4) >> 3) << 24);
- tmp += L2;
- L_out |= (((tmp + 2) >> 2) << 16);
- tmp1 = ((L_in & 0xff) + L2) << 1;
- L_out |= (((tmp1 + tmp + 4) >> 3) << 8);
-
- *((uint *)(SrcPtr - 4)) = L_out;
- }
- else
- *(--ptr) = ((L1 << 1) + L0 + R1 + 2) >> 2;
-
- } /* if(tmp < 0) */
-
- SrcPtr += pitch; // Increment to next set of pixel
-
- } /* end of: for(pel=0; pel<16; pel++) */
-
- } /* if(Strength[0] == 4) */
-
- else /* Normal filtering */
- {
-
- for (pel = 0; pel < 16; pel++)
- {
- Strng = Strength[pel >> 2];
- if (Strng)
- {
- // Read 8 pels
- R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0}
- L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}
- R1 = (R_in >> 8) & 0xff;
- R0 = R_in & 0xff;
- L0 = L_in >> 24;
- L1 = (L_in >> 16) & 0xff;
-
- //|R0 - L0| < Alpha
- tmp = R0 - L0;
- if (tmp < 0) tmp = -tmp;
- tmp -= Alpha;
-
- // |R0 - R1| < Beta
- tmp1 = R0 - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|L0 - L1| < Beta
- tmp1 = L0 - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- if (tmp < 0)
- {
- L2 = SrcPtr[-3];
- R2 = SrcPtr[2];
-
- // |R0 - R2| < Beta
- tmp = R0 - R2;
- if (tmp < 0) tmp = -tmp;
- aq = tmp - Beta;
-
- // |L0 - L2| < Beta
- tmp = L0 - L2;
- if (tmp < 0) tmp = -tmp;
- ap = tmp - Beta;
-
-
- c0 = C0 = clipTable[Strng];
- if (ap < 0) c0++;
- if (aq < 0) c0++;
-
- //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);
- dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;
- tmp = dif + c0;
- if ((uint)tmp > (uint)c0 << 1)
- {
- tmp = ~(tmp >> 31);
- dif = (tmp & (c0 << 1)) - c0;
- }
-
- ptr = SrcPtr;
- ptr1 = SrcPtr - 1;
- //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif);
- //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);
- R_in = R0 - dif;
- L_in = L0 + dif; /* cannot re-use R0 and L0 here */
- if ((uint)R_in > 255)
- {
- tmp = ~((int)R_in >> 31);
- R_in = tmp & 255;
- }
- if ((uint)L_in > 255)
- {
- tmp = ~((int)L_in >> 31);
- L_in = tmp & 255;
- }
- *ptr1-- = L_in;
- *ptr++ = R_in;
-
- if (C0 != 0) // Multiple zeros in the clip tables
- {
- if (ap < 0) //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1);
- {
- L2 = (L2 + ((R0 + L0 + 1) >> 1) - (L1 << 1)) >> 1;
- tmp = L2 + C0;
- if ((uint)tmp > (uint)C0 << 1)
- {
- tmp = ~(tmp >> 31);
- L2 = (tmp & (C0 << 1)) - C0;
- }
- *ptr1 += L2;
- }
-
- if (aq < 0) // SrcPtr[inc] += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1);
- {
- R2 = (R2 + ((R0 + L0 + 1) >> 1) - (R1 << 1)) >> 1;
- tmp = R2 + C0;
- if ((uint)tmp > (uint)C0 << 1)
- {
- tmp = ~(tmp >> 31);
- R2 = (tmp & (C0 << 1)) - C0;
- }
- *ptr += R2;
- }
- }
-
- } /* if(tmp < 0) */
-
- } /* end of: if((Strng = Strength[pel >> 2])) */
-
- SrcPtr += pitch; // Increment to next set of pixel
-
- } /* for(pel=0; pel<16; pel++) */
-
- } /* else if(Strength[0] == 4) */
-
-}
-
-void EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)
-{
- int pel, Strng;
- int c0, dif;
- int L1, L0, R0, R1, tmp, tmp1;
- uint8 *ptr;
- uint R_in, L_in;
-
-
- for (pel = 0; pel < 16; pel++)
- {
- Strng = Strength[pel>>2];
- if (Strng)
- {
- // Read 8 pels
- R_in = *((uint *)SrcPtr); // R_in = {R3, R2, R1, R0}
- L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}
- R1 = (R_in >> 8) & 0xff;
- R0 = R_in & 0xff;
- L0 = L_in >> 24;
- L1 = (L_in >> 16) & 0xff;
-
- // |R0 - R1| < Beta
- tmp1 = R0 - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp = (tmp1 - Beta);
-
- //|L0 - L1| < Beta
- tmp1 = L0 - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|R0 - L0| < Alpha
- tmp1 = R0 - L0;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Alpha);
-
- if (tmp < 0)
- {
- ptr = SrcPtr;
- if (Strng == 4) /* INTRA strong filtering */
- {
- *ptr-- = ((R1 << 1) + R0 + L1 + 2) >> 2;
- *ptr = ((L1 << 1) + L0 + R1 + 2) >> 2;
- }
- else /* normal filtering */
- {
- c0 = clipTable[Strng] + 1;
- //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);
- dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;
- tmp = dif + c0;
- if ((uint)tmp > (uint)c0 << 1)
- {
- tmp = ~(tmp >> 31);
- dif = (tmp & (c0 << 1)) - c0;
- }
-
- //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif);
- //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);
- L0 += dif;
- R0 -= dif;
- if ((uint)L0 > 255)
- {
- tmp = ~(L0 >> 31);
- L0 = tmp & 255;
- }
- if ((uint)R0 > 255)
- {
- tmp = ~(R0 >> 31);
- R0 = tmp & 255;
- }
-
- *ptr-- = R0;
- *ptr = L0;
- }
- }
- pel ++;
- SrcPtr += pitch; // Increment to next set of pixel
-
- } /* end of: if((Strng = Strength[pel >> 2])) */
- else
- {
- pel += 3;
- SrcPtr += (pitch << 1); //PtrInc << 1;
- }
-
- } /* end of: for(pel=0; pel<16; pel++) */
-}
-
-
-void EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)
-{
- int pel, Strng;
- int c0, dif;
- int L1, L0, R0, R1, tmp, tmp1;
-
- for (pel = 0; pel < 16; pel++)
- {
- Strng = Strength[pel>>2];
- if (Strng)
- {
- R0 = SrcPtr[0];
- L0 = SrcPtr[-pitch];
- L1 = SrcPtr[-(pitch<<1)]; //inc2
- R1 = SrcPtr[pitch];
-
- // |R0 - R1| < Beta
- tmp1 = R0 - R1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp = (tmp1 - Beta);
-
- //|L0 - L1| < Beta
- tmp1 = L0 - L1;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Beta);
-
- //|R0 - L0| < Alpha
- tmp1 = R0 - L0;
- if (tmp1 < 0) tmp1 = -tmp1;
- tmp &= (tmp1 - Alpha);
-
- if (tmp < 0)
- {
- if (Strng == 4) /* INTRA strong filtering */
- {
- SrcPtr[0] = ((R1 << 1) + R0 + L1 + 2) >> 2;
- SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2;
- }
- else /* normal filtering */
- {
- c0 = clipTable[Strng] + 1;
- //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);
- dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;
- tmp = dif + c0;
- if ((uint)tmp > (uint)c0 << 1)
- {
- tmp = ~(tmp >> 31);
- dif = (tmp & (c0 << 1)) - c0;
- }
-
- //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);
- //SrcPtr[0] = (uint8)IClip(0, 255, R0 - dif);
- L0 += dif;
- R0 -= dif;
- if ((uint)L0 > 255)
- {
- tmp = ~(L0 >> 31);
- L0 = tmp & 255;
- }
- if ((uint)R0 > 255)
- {
- tmp = ~(R0 >> 31);
- R0 = tmp & 255;
- }
- SrcPtr[0] = R0;
- SrcPtr[-pitch] = L0;
- }
- }
-
- pel ++;
- SrcPtr ++; // Increment to next set of pixel
-
- } /* end of: if((Strng = Strength[pel >> 2])) */
- else
- {
- pel += 3;
- SrcPtr += 2;
- }
-
- } /* end of: for(pel=0; pel<16; pel++) */
-}
-
-
-
-
diff --git a/media/libstagefright/codecs/avc/common/src/dpb.cpp b/media/libstagefright/codecs/avc/common/src/dpb.cpp
deleted file mode 100644
index b5d0dfe..0000000
--- a/media/libstagefright/codecs/avc/common/src/dpb.cpp
+++ /dev/null
@@ -1,724 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avclib_common.h"
-
-#define DPB_MEM_ATTR 0
-
-AVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding)
-{
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int level, framesize, num_fs;
- void *userData = avcHandle->userData;
-#ifndef PV_MEMORY_POOL
- uint32 addr;
-#endif
- uint16 refIdx = 0;
- level = video->currSeqParams->level_idc;
-
- for (num_fs = 0; num_fs < MAX_FS; num_fs++)
- {
- dpb->fs[num_fs] = NULL;
- }
-
- framesize = (int)(((FrameHeightInMbs * PicWidthInMbs) << 7) * 3);
- if (padding)
- {
- video->padded_size = (int)((((FrameHeightInMbs + 2) * (PicWidthInMbs + 2)) << 7) * 3) - framesize;
- }
- else
- {
- video->padded_size = 0;
- }
-
-#ifndef PV_MEMORY_POOL
- if (dpb->decoded_picture_buffer)
- {
- avcHandle->CBAVC_Free(userData, (int)dpb->decoded_picture_buffer);
- dpb->decoded_picture_buffer = NULL;
- }
-#endif
- /* need to allocate one extra frame for current frame, DPB only defines for reference frames */
-
- dpb->num_fs = (uint32)(MaxDPBX2[mapLev2Idx[level]] << 2) / (3 * FrameHeightInMbs * PicWidthInMbs) + 1;
- if (dpb->num_fs > MAX_FS)
- {
- dpb->num_fs = MAX_FS;
- }
-
- if (video->currSeqParams->num_ref_frames + 1 > (uint32)dpb->num_fs)
- {
- dpb->num_fs = video->currSeqParams->num_ref_frames + 1;
- }
-
- dpb->dpb_size = dpb->num_fs * (framesize + video->padded_size);
-// dpb->dpb_size = (uint32)MaxDPBX2[mapLev2Idx[level]]*512 + framesize;
-
-#ifndef PV_MEMORY_POOL
- dpb->decoded_picture_buffer = (uint8*) avcHandle->CBAVC_Malloc(userData, dpb->dpb_size, 100/*DPB_MEM_ATTR*/);
-
- if (dpb->decoded_picture_buffer == NULL || dpb->decoded_picture_buffer&0x3) // not word aligned
- return AVC_MEMORY_FAIL;
-#endif
- dpb->used_size = 0;
- num_fs = 0;
-
- while (num_fs < dpb->num_fs)
- {
- /* fs is an array pointers to AVCDecPicture */
- dpb->fs[num_fs] = (AVCFrameStore*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCFrameStore), 101/*DEFAULT_ATTR*/);
- if (dpb->fs[num_fs] == NULL)
- {
- return AVC_MEMORY_FAIL;
- }
-#ifndef PV_MEMORY_POOL
- /* assign the actual memory for Sl, Scb, Scr */
- dpb->fs[num_fs]->base_dpb = dpb->decoded_picture_buffer + dpb->used_size;
-#endif
- dpb->fs[num_fs]->IsReference = 0;
- dpb->fs[num_fs]->IsLongTerm = 0;
- dpb->fs[num_fs]->IsOutputted = 3;
- dpb->fs[num_fs]->frame.RefIdx = refIdx++; /* this value will remain unchanged through out the encoding session */
- dpb->fs[num_fs]->frame.picType = AVC_FRAME;
- dpb->fs[num_fs]->frame.isLongTerm = 0;
- dpb->fs[num_fs]->frame.isReference = 0;
- video->RefPicList0[num_fs] = &(dpb->fs[num_fs]->frame);
- dpb->fs[num_fs]->frame.padded = 0;
- dpb->used_size += (framesize + video->padded_size);
- num_fs++;
- }
-
- return AVC_SUCCESS;
-}
-
-OSCL_EXPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding)
-{
- void *userData = avcHandle->userData;
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int framesize, ii; /* size of one frame */
- uint PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs, PicSizeInMapUnits;
- uint num_fs;
- /* derived variables from SPS */
- PicWidthInMbs = video->currSeqParams->pic_width_in_mbs_minus1 + 1;
- PicHeightInMapUnits = video->currSeqParams->pic_height_in_map_units_minus1 + 1 ;
- FrameHeightInMbs = (2 - video->currSeqParams->frame_mbs_only_flag) * PicHeightInMapUnits ;
- PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ;
-
- if (video->PicSizeInMapUnits != PicSizeInMapUnits || video->currSeqParams->level_idc != video->level_idc)
- {
- /* make sure you mark all the frames as unused for reference for flushing*/
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- dpb->fs[ii]->IsReference = 0;
- dpb->fs[ii]->IsOutputted |= 0x02;
- }
-
- num_fs = (uint32)(MaxDPBX2[(uint32)mapLev2Idx[video->currSeqParams->level_idc]] << 2) / (3 * PicSizeInMapUnits) + 1;
- if (num_fs >= MAX_FS)
- {
- num_fs = MAX_FS;
- }
-#ifdef PV_MEMORY_POOL
- if (padding)
- {
- avcHandle->CBAVC_DPBAlloc(avcHandle->userData,
- PicSizeInMapUnits + ((PicWidthInMbs + 2) << 1) + (PicHeightInMapUnits << 1), num_fs);
- }
- else
- {
- avcHandle->CBAVC_DPBAlloc(avcHandle->userData, PicSizeInMapUnits, num_fs);
- }
-#endif
- CleanUpDPB(avcHandle, video);
- if (InitDPB(avcHandle, video, FrameHeightInMbs, PicWidthInMbs, padding) != AVC_SUCCESS)
- {
- return AVC_FAIL;
- }
- /* Allocate video->mblock upto PicSizeInMbs and populate the structure such as the neighboring MB pointers. */
- framesize = (FrameHeightInMbs * PicWidthInMbs);
- if (video->mblock)
- {
- avcHandle->CBAVC_Free(userData, video->mblock);
- video->mblock = NULL;
- }
- video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR);
- if (video->mblock == NULL)
- {
- return AVC_FAIL;
- }
- for (ii = 0; ii < framesize; ii++)
- {
- video->mblock[ii].slice_id = -1;
- }
- /* Allocate memory for intra prediction */
-#ifdef MB_BASED_DEBLOCK
- video->intra_pred_top = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 4, FAST_MEM_ATTR);
- if (video->intra_pred_top == NULL)
- {
- return AVC_FAIL;
- }
- video->intra_pred_top_cb = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR);
- if (video->intra_pred_top_cb == NULL)
- {
- return AVC_FAIL;
- }
- video->intra_pred_top_cr = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR);
- if (video->intra_pred_top_cr == NULL)
- {
- return AVC_FAIL;
- }
-
-#endif
- /* Allocate slice group MAP map */
-
- if (video->MbToSliceGroupMap)
- {
- avcHandle->CBAVC_Free(userData, video->MbToSliceGroupMap);
- video->MbToSliceGroupMap = NULL;
- }
- video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits * 2, 7/*DEFAULT_ATTR*/);
- if (video->MbToSliceGroupMap == NULL)
- {
- return AVC_FAIL;
- }
- video->PicSizeInMapUnits = PicSizeInMapUnits;
- video->level_idc = video->currSeqParams->level_idc;
-
- }
- return AVC_SUCCESS;
-}
-
-OSCL_EXPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video)
-{
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int ii;
- void *userData = avcHandle->userData;
-
- for (ii = 0; ii < MAX_FS; ii++)
- {
- if (dpb->fs[ii] != NULL)
- {
- avcHandle->CBAVC_Free(userData, dpb->fs[ii]);
- dpb->fs[ii] = NULL;
- }
- }
-#ifndef PV_MEMORY_POOL
- if (dpb->decoded_picture_buffer)
- {
- avcHandle->CBAVC_Free(userData, dpb->decoded_picture_buffer);
- dpb->decoded_picture_buffer = NULL;
- }
-#endif
- dpb->used_size = 0;
- dpb->dpb_size = 0;
-
- return AVC_SUCCESS;
-}
-
-OSCL_EXPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video)
-{
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int ii, status;
-
- /* Before doing any decoding, check if there's a frame memory available */
- /* look for next unused dpb->fs, or complementary field pair */
- /* video->currPic is assigned to this */
-
- /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */
-
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- /* looking for the one not used or not reference and has been outputted */
- if (dpb->fs[ii]->IsReference == 0 && dpb->fs[ii]->IsOutputted == 3)
- {
- video->currFS = dpb->fs[ii];
-#ifdef PV_MEMORY_POOL
- status = avcHandle->CBAVC_FrameBind(avcHandle->userData, ii, &(video->currFS->base_dpb));
- if (status == AVC_FAIL)
- {
- return AVC_NO_BUFFER; /* this should not happen */
- }
-#endif
- break;
- }
- }
- if (ii == dpb->num_fs)
- {
- return AVC_PICTURE_OUTPUT_READY; /* no empty frame available */
- }
- return AVC_SUCCESS;
-}
-
-OSCL_EXPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum)
-{
- int offset = 0;
- int offsetc = 0;
- int luma_framesize;
- /* this part has to be set here, assuming that slice header and POC have been decoded. */
- /* used in GetOutput API */
- video->currFS->PicOrderCnt = video->PicOrderCnt;
- video->currFS->FrameNum = video->sliceHdr->frame_num;
- video->currFS->FrameNumWrap = CurrPicNum; // MC_FIX
- /* initialize everything to zero */
- video->currFS->IsOutputted = 0;
- video->currFS->IsReference = 0;
- video->currFS->IsLongTerm = 0;
- video->currFS->frame.isReference = FALSE;
- video->currFS->frame.isLongTerm = FALSE;
-
- /* initialize the pixel pointer to NULL */
- video->currFS->frame.Sl = video->currFS->frame.Scb = video->currFS->frame.Scr = NULL;
-
- /* determine video->currPic */
- /* assign dbp->base_dpb to fs[i]->frame.Sl, Scb, Scr .*/
- /* For PicSizeInMbs, see DecodeSliceHeader() */
-
- video->currPic = &(video->currFS->frame);
-
- video->currPic->padded = 0; // reset this flag to not-padded
-
- if (video->padded_size)
- {
- offset = ((video->PicWidthInSamplesL + 32) << 4) + 16; // offset to the origin
- offsetc = (offset >> 2) + 4;
- luma_framesize = (int)((((video->FrameHeightInMbs + 2) * (video->PicWidthInMbs + 2)) << 8));
- }
- else
- luma_framesize = video->PicSizeInMbs << 8;
-
-
- video->currPic->Sl = video->currFS->base_dpb + offset;
- video->currPic->Scb = video->currFS->base_dpb + luma_framesize + offsetc;
- video->currPic->Scr = video->currPic->Scb + (luma_framesize >> 2);
- video->currPic->pitch = video->PicWidthInSamplesL + (video->padded_size == 0 ? 0 : 32);
-
-
- video->currPic->height = video->PicHeightInSamplesL;
- video->currPic->width = video->PicWidthInSamplesL;
- video->currPic->PicNum = CurrPicNum;
-}
-
-/* to release skipped frame after encoding */
-OSCL_EXPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video)
-{
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int ii;
-
- video->currFS->IsOutputted = 3; // return this buffer.
-
-#ifdef PV_MEMORY_POOL /* for non-memory pool, no need to do anything */
-
- /* search for current frame index */
- ii = dpb->num_fs;
- while (ii--)
- {
- if (dpb->fs[ii] == video->currFS)
- {
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);
- break;
- }
- }
-#endif
-
- return ;
-}
-
-/* see subclause 8.2.5.1 */
-OSCL_EXPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video)
-{
- AVCStatus status;
- AVCDecPicBuffer *dpb = video->decPicBuf;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- int ii, num_ref;
-
- /* number 1 of 8.2.5.1, we handle gaps in frame_num differently without using the memory */
- /* to be done!!!! */
-
- /* number 3 of 8.2.5.1 */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii] != video->currFS) /* not current frame */
- {
- dpb->fs[ii]->IsReference = 0; /* mark as unused for reference */
- dpb->fs[ii]->IsLongTerm = 0; /* but still used until output */
- dpb->fs[ii]->IsOutputted |= 0x02;
-#ifdef PV_MEMORY_POOL
- if (dpb->fs[ii]->IsOutputted == 3)
- {
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);
- }
-#endif
- }
- }
-
- video->currPic->isReference = TRUE;
- video->currFS->IsReference = 3;
-
- if (sliceHdr->long_term_reference_flag == 0)
- {
- video->currPic->isLongTerm = FALSE;
- video->currFS->IsLongTerm = 0;
- video->MaxLongTermFrameIdx = -1;
- }
- else
- {
- video->currPic->isLongTerm = TRUE;
- video->currFS->IsLongTerm = 3;
- video->currFS->LongTermFrameIdx = 0;
- video->MaxLongTermFrameIdx = 0;
- }
- if (sliceHdr->no_output_of_prior_pics_flag)
- {
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii] != video->currFS) /* not current frame */
- {
- dpb->fs[ii]->IsOutputted = 3;
-#ifdef PV_MEMORY_POOL
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);
-#endif
- }
- }
- }
- video->mem_mgr_ctrl_eq_5 = TRUE; /* flush reference frames MC_FIX */
- }
- else
- {
- if (video->currPic->isReference == TRUE)
- {
- if (sliceHdr->adaptive_ref_pic_marking_mode_flag == 0)
- {
- status = sliding_window_process(avcHandle, video, dpb); /* we may have to do this after adaptive_memory_marking */
- }
- else
- {
- status = adaptive_memory_marking(avcHandle, video, dpb, sliceHdr);
- }
- if (status != AVC_SUCCESS)
- {
- return status;
- }
- }
- }
- /* number 4 of 8.2.5.1 */
- /* This basically says every frame must be at least used for short-term ref. */
- /* Need to be revisited!!! */
- /* look at insert_picture_in_dpb() */
-
-
-
- if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currPic->isLongTerm == FALSE)
- {
- if (video->currPic->isReference)
- {
- video->currFS->IsReference = 3;
- }
- else
- {
- video->currFS->IsReference = 0;
- }
- video->currFS->IsLongTerm = 0;
- }
-
- /* check if number of reference frames doesn't exceed num_ref_frames */
- num_ref = 0;
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsReference)
- {
- num_ref++;
- }
- }
-
- if (num_ref > (int)video->currSeqParams->num_ref_frames)
- {
- return AVC_FAIL; /* out of range */
- }
-
- return AVC_SUCCESS;
-}
-
-
-AVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb)
-{
- int ii, numShortTerm, numLongTerm;
- int32 MinFrameNumWrap;
- int MinIdx;
-
-
- numShortTerm = 0;
- numLongTerm = 0;
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii] != video->currFS) /* do not count the current frame */
- {
- if (dpb->fs[ii]->IsLongTerm)
- {
- numLongTerm++;
- }
- else if (dpb->fs[ii]->IsReference)
- {
- numShortTerm++;
- }
- }
- }
-
- while (numShortTerm + numLongTerm >= (int)video->currSeqParams->num_ref_frames)
- {
- /* get short-term ref frame with smallest PicOrderCnt */
- /* this doesn't work for all I-slice clip since PicOrderCnt will not be initialized */
-
- MinFrameNumWrap = 0x7FFFFFFF;
- MinIdx = -1;
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsReference && !dpb->fs[ii]->IsLongTerm)
- {
- if (dpb->fs[ii]->FrameNumWrap < MinFrameNumWrap)
- {
- MinFrameNumWrap = dpb->fs[ii]->FrameNumWrap;
- MinIdx = ii;
- }
- }
- }
- if (MinIdx < 0) /* something wrong, impossible */
- {
- return AVC_FAIL;
- }
-
- /* mark the frame with smallest PicOrderCnt to be unused for reference */
- dpb->fs[MinIdx]->IsReference = 0;
- dpb->fs[MinIdx]->IsLongTerm = 0;
- dpb->fs[MinIdx]->frame.isReference = FALSE;
- dpb->fs[MinIdx]->frame.isLongTerm = FALSE;
- dpb->fs[MinIdx]->IsOutputted |= 0x02;
-#ifdef PV_MEMORY_POOL
- if (dpb->fs[MinIdx]->IsOutputted == 3)
- {
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx);
- }
-#endif
- numShortTerm--;
- }
- return AVC_SUCCESS;
-}
-
-/* see subclause 8.2.5.4 */
-AVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr)
-{
- int ii;
-
- ii = 0;
- while (ii < MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[ii] != 0)
- {
- switch (sliceHdr->memory_management_control_operation[ii])
- {
- case 1:
- MemMgrCtrlOp1(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii]);
- // update_ref_list(dpb);
- break;
- case 2:
- MemMgrCtrlOp2(avcHandle, dpb, sliceHdr->long_term_pic_num[ii]);
- break;
- case 3:
- MemMgrCtrlOp3(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii], sliceHdr->long_term_frame_idx[ii]);
- break;
- case 4:
- MemMgrCtrlOp4(avcHandle, video, dpb, sliceHdr->max_long_term_frame_idx_plus1[ii]);
- break;
- case 5:
- MemMgrCtrlOp5(avcHandle, video, dpb);
- video->currFS->FrameNum = 0; //
- video->currFS->PicOrderCnt = 0;
- break;
- case 6:
- MemMgrCtrlOp6(avcHandle, video, dpb, sliceHdr->long_term_frame_idx[ii]);
- break;
- }
- ii++;
- }
-
- if (ii == MAX_DEC_REF_PIC_MARKING)
- {
- return AVC_FAIL; /* exceed the limit */
- }
-
- return AVC_SUCCESS;
-}
-
-
-/* see subclause 8.2.5.4.1, mark short-term picture as "unused for reference" */
-void MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1)
-{
- int picNumX, ii;
-
- picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1);
-
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsReference == 3 && dpb->fs[ii]->IsLongTerm == 0)
- {
- if (dpb->fs[ii]->frame.PicNum == picNumX)
- {
- unmark_for_reference(avcHandle, dpb, ii);
- return ;
- }
- }
- }
-
- return ;
-}
-
-/* see subclause 8.2.5.4.2 mark long-term picture as "unused for reference" */
-void MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num)
-{
- int ii;
-
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsLongTerm == 3)
- {
- if (dpb->fs[ii]->frame.LongTermPicNum == long_term_pic_num)
- {
- unmark_for_reference(avcHandle, dpb, ii);
- }
- }
- }
-}
-
-/* see subclause 8.2.5.4.3 assign LongTermFrameIdx to a short-term ref picture */
-void MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1,
- uint long_term_frame_idx)
-{
- int picNumX, ii;
-
- picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1);
-
- /* look for fs[i] with long_term_frame_idx */
-
- unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx);
-
-
- /* now mark the picture with picNumX to long term frame idx */
-
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsReference == 3)
- {
- if ((dpb->fs[ii]->frame.isLongTerm == FALSE) && (dpb->fs[ii]->frame.PicNum == picNumX))
- {
- dpb->fs[ii]->LongTermFrameIdx = long_term_frame_idx;
- dpb->fs[ii]->frame.LongTermPicNum = long_term_frame_idx;
-
- dpb->fs[ii]->frame.isLongTerm = TRUE;
-
- dpb->fs[ii]->IsLongTerm = 3;
- return;
- }
- }
- }
-
-}
-
-/* see subclause 8.2.5.4.4, MaxLongTermFrameIdx */
-void MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1)
-{
- int ii;
-
- video->MaxLongTermFrameIdx = max_long_term_frame_idx_plus1 - 1;
-
- /* then mark long term frame with exceeding LongTermFrameIdx to unused for reference. */
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
- if (dpb->fs[ii]->IsLongTerm && dpb->fs[ii] != video->currFS)
- {
- if (dpb->fs[ii]->LongTermFrameIdx > video->MaxLongTermFrameIdx)
- {
- unmark_for_reference(avcHandle, dpb, ii);
- }
- }
- }
-}
-
-/* see subclause 8.2.5.4.5 mark all reference picture as "unused for reference" and setting
-MaxLongTermFrameIdx to "no long-term frame indices" */
-void MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb)
-{
- int ii;
-
- video->MaxLongTermFrameIdx = -1;
- for (ii = 0; ii < dpb->num_fs; ii++) /* including the current frame ??????*/
- {
- if (dpb->fs[ii] != video->currFS) // MC_FIX
- {
- unmark_for_reference(avcHandle, dpb, ii);
- }
- }
-
- video->mem_mgr_ctrl_eq_5 = TRUE;
-}
-
-/* see subclause 8.2.5.4.6 assing long-term frame index to the current picture */
-void MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx)
-{
-
- unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx);
- video->currFS->IsLongTerm = 3;
- video->currFS->IsReference = 3;
-
- video->currPic->isLongTerm = TRUE;
- video->currPic->isReference = TRUE;
- video->currFS->LongTermFrameIdx = long_term_frame_idx;
-}
-
-
-void unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx)
-{
-
- AVCFrameStore *fs = dpb->fs[idx];
- fs->frame.isReference = FALSE;
- fs->frame.isLongTerm = FALSE;
-
- fs->IsLongTerm = 0;
- fs->IsReference = 0;
- fs->IsOutputted |= 0x02;
-#ifdef PV_MEMORY_POOL
- if (fs->IsOutputted == 3)
- {
- avcHandle->CBAVC_FrameUnbind(avcHandle->userData, idx);
- }
-#endif
- return ;
-}
-
-void unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx)
-{
- int ii;
- for (ii = 0; ii < dpb->num_fs; ii++)
- {
-
- if (dpb->fs[ii]->IsLongTerm && (dpb->fs[ii]->LongTermFrameIdx == (int)long_term_frame_idx))
- {
- unmark_for_reference(avcHandle, dpb, ii);
- }
-
- }
-}
-
-
diff --git a/media/libstagefright/codecs/avc/common/src/fmo.cpp b/media/libstagefright/codecs/avc/common/src/fmo.cpp
deleted file mode 100644
index d66eba3..0000000
--- a/media/libstagefright/codecs/avc/common/src/fmo.cpp
+++ /dev/null
@@ -1,249 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include <string.h>
-
-#include "avclib_common.h"
-
-/* see subclause 8.2.2 Decoding process for macroblock to slice group map */
-OSCL_EXPORT_REF AVCStatus FMOInit(AVCCommonObj *video)
-{
- AVCPicParamSet *currPPS = video->currPicParams;
- int *MbToSliceGroupMap = video->MbToSliceGroupMap;
- int PicSizeInMapUnits = video->PicSizeInMapUnits;
- int PicWidthInMbs = video->PicWidthInMbs;
-
- if (currPPS->num_slice_groups_minus1 == 0)
- {
- memset(video->MbToSliceGroupMap, 0, video->PicSizeInMapUnits*sizeof(uint));
- }
- else
- {
- switch (currPPS->slice_group_map_type)
- {
- case 0:
- FmoGenerateType0MapUnitMap(MbToSliceGroupMap, currPPS->run_length_minus1, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);
- break;
- case 1:
- FmoGenerateType1MapUnitMap(MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);
- break;
- case 2:
- FmoGenerateType2MapUnitMap(currPPS, MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);
- break;
- case 3:
- FmoGenerateType3MapUnitMap(video, currPPS, MbToSliceGroupMap, PicWidthInMbs);
- break;
- case 4:
- FmoGenerateType4MapUnitMap(MbToSliceGroupMap, video->MapUnitsInSliceGroup0, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits);
- break;
- case 5:
- FmoGenerateType5MapUnitMap(MbToSliceGroupMap, video, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits);
- break;
- case 6:
- FmoGenerateType6MapUnitMap(MbToSliceGroupMap, (int*)currPPS->slice_group_id, PicSizeInMapUnits);
- break;
- default:
- return AVC_FAIL; /* out of range, shouldn't come this far */
- }
- }
-
- return AVC_SUCCESS;
-}
-
-/* see subclause 8.2.2.1 interleaved slice group map type*/
-void FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits)
-{
- uint iGroup, j;
- uint i = 0;
- do
- {
- for (iGroup = 0;
- (iGroup <= num_slice_groups_minus1) && (i < PicSizeInMapUnits);
- i += run_length_minus1[iGroup++] + 1)
- {
- for (j = 0; j <= run_length_minus1[ iGroup ] && i + j < PicSizeInMapUnits; j++)
- mapUnitToSliceGroupMap[i+j] = iGroup;
- }
- }
- while (i < PicSizeInMapUnits);
-}
-
-/* see subclause 8.2.2.2 dispersed slice group map type*/
-void FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits)
-{
- uint i;
- for (i = 0; i < PicSizeInMapUnits; i++)
- {
- mapUnitToSliceGroupMap[i] = ((i % PicWidthInMbs) + (((i / PicWidthInMbs) * (num_slice_groups_minus1 + 1)) / 2))
- % (num_slice_groups_minus1 + 1);
- }
-}
-
-/* see subclause 8.2.2.3 foreground with left-over slice group map type */
-void FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs,
- uint num_slice_groups_minus1, uint PicSizeInMapUnits)
-{
- int iGroup;
- uint i, x, y;
- uint yTopLeft, xTopLeft, yBottomRight, xBottomRight;
-
- for (i = 0; i < PicSizeInMapUnits; i++)
- {
- mapUnitToSliceGroupMap[ i ] = num_slice_groups_minus1;
- }
-
- for (iGroup = num_slice_groups_minus1 - 1 ; iGroup >= 0; iGroup--)
- {
- yTopLeft = pps->top_left[ iGroup ] / PicWidthInMbs;
- xTopLeft = pps->top_left[ iGroup ] % PicWidthInMbs;
- yBottomRight = pps->bottom_right[ iGroup ] / PicWidthInMbs;
- xBottomRight = pps->bottom_right[ iGroup ] % PicWidthInMbs;
- for (y = yTopLeft; y <= yBottomRight; y++)
- {
- for (x = xTopLeft; x <= xBottomRight; x++)
- {
- mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = iGroup;
- }
- }
- }
-}
-
-
-/* see subclause 8.2.2.4 box-out slice group map type */
-/* follow the text rather than the JM, it's quite different. */
-void FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap,
- int PicWidthInMbs)
-{
- uint i, k;
- int leftBound, topBound, rightBound, bottomBound;
- int x, y, xDir, yDir;
- int mapUnitVacant;
- uint PicSizeInMapUnits = video->PicSizeInMapUnits;
- uint MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0;
-
- for (i = 0; i < PicSizeInMapUnits; i++)
- {
- mapUnitToSliceGroupMap[ i ] = 1;
- }
-
- x = (PicWidthInMbs - pps->slice_group_change_direction_flag) / 2;
- y = (video->PicHeightInMapUnits - pps->slice_group_change_direction_flag) / 2;
-
- leftBound = x;
- topBound = y;
- rightBound = x;
- bottomBound = y;
-
- xDir = pps->slice_group_change_direction_flag - 1;
- yDir = pps->slice_group_change_direction_flag;
-
- for (k = 0; k < MapUnitsInSliceGroup0; k += mapUnitVacant)
- {
- mapUnitVacant = (mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] == 1);
- if (mapUnitVacant)
- {
- mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = 0;
- }
-
- if (xDir == -1 && x == leftBound)
- {
- leftBound = AVC_MAX(leftBound - 1, 0);
- x = leftBound;
- xDir = 0;
- yDir = 2 * pps->slice_group_change_direction_flag - 1;
- }
- else if (xDir == 1 && x == rightBound)
- {
- rightBound = AVC_MIN(rightBound + 1, (int)PicWidthInMbs - 1);
- x = rightBound;
- xDir = 0;
- yDir = 1 - 2 * pps->slice_group_change_direction_flag;
- }
- else if (yDir == -1 && y == topBound)
- {
- topBound = AVC_MAX(topBound - 1, 0);
- y = topBound;
- xDir = 1 - 2 * pps->slice_group_change_direction_flag;
- yDir = 0;
- }
- else if (yDir == 1 && y == bottomBound)
- {
- bottomBound = AVC_MIN(bottomBound + 1, (int)video->PicHeightInMapUnits - 1);
- y = bottomBound;
- xDir = 2 * pps->slice_group_change_direction_flag - 1;
- yDir = 0;
- }
- else
- {
- x = x + xDir;
- y = y + yDir;
- }
- }
-}
-
-/* see subclause 8.2.2.5 raster scan slice group map types */
-void FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0, int slice_group_change_direction_flag, uint PicSizeInMapUnits)
-{
- uint sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0;
-
- uint i;
-
- for (i = 0; i < PicSizeInMapUnits; i++)
- if (i < sizeOfUpperLeftGroup)
- mapUnitToSliceGroupMap[ i ] = 1 - slice_group_change_direction_flag;
- else
- mapUnitToSliceGroupMap[ i ] = slice_group_change_direction_flag;
-
-}
-
-/* see subclause 8.2.2.6, wipe slice group map type. */
-void FmoGenerateType5MapUnitMap(int *mapUnitToSliceGroupMap, AVCCommonObj *video,
- int slice_group_change_direction_flag, uint PicSizeInMapUnits)
-{
- int PicWidthInMbs = video->PicWidthInMbs;
- int PicHeightInMapUnits = video->PicHeightInMapUnits;
- int MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0;
- int sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0;
- int i, j, k = 0;
-
- for (j = 0; j < PicWidthInMbs; j++)
- {
- for (i = 0; i < PicHeightInMapUnits; i++)
- {
- if (k++ < sizeOfUpperLeftGroup)
- {
- mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = 1 - slice_group_change_direction_flag;
- }
- else
- {
- mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = slice_group_change_direction_flag;
- }
- }
- }
-}
-
-/* see subclause 8.2.2.7, explicit slice group map */
-void FmoGenerateType6MapUnitMap(int *mapUnitToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits)
-{
- uint i;
- for (i = 0; i < PicSizeInMapUnits; i++)
- {
- mapUnitToSliceGroupMap[i] = slice_group_id[i];
- }
-}
-
-
diff --git a/media/libstagefright/codecs/avc/common/src/mb_access.cpp b/media/libstagefright/codecs/avc/common/src/mb_access.cpp
deleted file mode 100644
index 414b8f7..0000000
--- a/media/libstagefright/codecs/avc/common/src/mb_access.cpp
+++ /dev/null
@@ -1,471 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include <string.h>
-
-#include "avclib_common.h"
-
-OSCL_EXPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum)
-{
- int PicWidthInMbs = video->PicWidthInMbs;
-
- // do frame-only and postpone intraAvail calculattion
- video->mbAddrA = mbNum - 1;
- video->mbAddrB = mbNum - PicWidthInMbs;
- video->mbAddrC = mbNum - PicWidthInMbs + 1;
- video->mbAddrD = mbNum - PicWidthInMbs - 1;
-
- video->mbAvailA = video->mbAvailB = video->mbAvailC = video->mbAvailD = 0;
- if (video->mb_x)
- {
- video->mbAvailA = (video->mblock[video->mbAddrA].slice_id == video->currMB->slice_id);
- if (video->mb_y)
- {
- video->mbAvailD = (video->mblock[video->mbAddrD].slice_id == video->currMB->slice_id);
- }
- }
-
- if (video->mb_y)
- {
- video->mbAvailB = (video->mblock[video->mbAddrB].slice_id == video->currMB->slice_id);
- if (video->mb_x < (PicWidthInMbs - 1))
- {
- video->mbAvailC = (video->mblock[video->mbAddrC].slice_id == video->currMB->slice_id);
- }
- }
- return ;
-}
-
-bool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr)
-{
- if (mbAddr < 0 || mbAddr >= (int)PicSizeInMbs)
- {
- return FALSE;
- }
-
- if (mblock[mbAddr].slice_id != mblock[currMbAddr].slice_id)
- {
- return FALSE;
- }
-
- return TRUE;
-}
-
-OSCL_EXPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j)
-{
- int pred_nnz = 0;
- int cnt = 1;
- AVCMacroblock *tempMB;
-
- /* left block */
- /*getLuma4x4Neighbour(video, mb_nr, i, j, -1, 0, &pix);
- leftMB = video->mblock + pix.mb_addr; */
- /* replace the above with below (won't work for field decoding), 1/19/04 */
-
- if (i)
- {
- pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1];
- }
- else
- {
- if (video->mbAvailA)
- {
- tempMB = video->mblock + video->mbAddrA;
- pred_nnz = tempMB->nz_coeff[(j<<2)+3];
- }
- else
- {
- cnt = 0;
- }
- }
-
-
- /* top block */
- /*getLuma4x4Neighbour(video, mb_nr, i, j, 0, -1, &pix);
- topMB = video->mblock + pix.mb_addr;*/
- /* replace the above with below (won't work for field decoding), 1/19/04 */
-
- if (j)
- {
- pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i];
- cnt++;
- }
- else
- {
- if (video->mbAvailB)
- {
- tempMB = video->mblock + video->mbAddrB;
- pred_nnz += tempMB->nz_coeff[12+i];
- cnt++;
- }
- }
-
-
- if (cnt == 2)
- {
- pred_nnz = (pred_nnz + 1) >> 1;
- }
-
- return pred_nnz;
-
-}
-
-
-OSCL_EXPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j)
-{
- int pred_nnz = 0;
- int cnt = 1;
- AVCMacroblock *tempMB;
-
- /* left block */
- /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, -1, 0, &pix);
- leftMB = video->mblock + pix.mb_addr;*/
- /* replace the above with below (won't work for field decoding), 1/19/04 */
- if (i&1)
- {
- pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1];
-
- }
- else
- {
- if (video->mbAvailA)
- {
- tempMB = video->mblock + video->mbAddrA;
- pred_nnz = tempMB->nz_coeff[(j<<2)+i+1];
- }
- else
- {
- cnt = 0;
- }
- }
-
-
- /* top block */
- /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, 0, -1, &pix);
- topMB = video->mblock + pix.mb_addr;*/
- /* replace the above with below (won't work for field decoding), 1/19/04 */
-
- if (j&1)
- {
- pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i];
- cnt++;
- }
- else
- {
- if (video->mbAvailB)
- {
- tempMB = video->mblock + video->mbAddrB;
- pred_nnz += tempMB->nz_coeff[20+i];
- cnt++;
- }
-
- }
-
- if (cnt == 2)
- {
- pred_nnz = (pred_nnz + 1) >> 1;
- }
-
- return pred_nnz;
-}
-
-OSCL_EXPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag)
-{
- AVCMacroblock *currMB = video->currMB;
- AVCMacroblock *MB_A, *MB_B, *MB_C, *MB_D;
- int block_x, block_y, block_x_1, block_y_1, new_block_x;
- int mbPartIdx, subMbPartIdx, offset_indx;
- int16 *mv, pmv_x, pmv_y;
- int nmSubMbHeight, nmSubMbWidth, mbPartIdx_X, mbPartIdx_Y;
- int avail_a, avail_b, avail_c;
- const static uint32 C = 0x5750;
- int i, j, offset_MbPart_indx, refIdxLXA, refIdxLXB, refIdxLXC = 0, curr_ref_idx;
- int pmv_A_x, pmv_B_x, pmv_C_x = 0, pmv_A_y, pmv_B_y, pmv_C_y = 0;
-
- /* we have to take care of Intra/skip blocks somewhere, i.e. set MV to 0 and set ref to -1! */
- /* we have to populate refIdx as well */
-
-
- MB_A = &video->mblock[video->mbAddrA];
- MB_B = &video->mblock[video->mbAddrB];
-
-
- if (currMB->mbMode == AVC_SKIP /* && !encFlag */) /* only for decoder */
- {
- currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] = currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = 0;
- if (video->mbAvailA && video->mbAvailB)
- {
- if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) ||
- (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0))
- {
- memset(currMB->mvL0, 0, sizeof(int32)*16);
- return;
- }
- }
- else
- {
- memset(currMB->mvL0, 0, sizeof(int32)*16);
- return;
- }
- video->mvd_l0[0][0][0] = 0;
- video->mvd_l0[0][0][1] = 0;
- }
-
- MB_C = &video->mblock[video->mbAddrC];
- MB_D = &video->mblock[video->mbAddrD];
-
- offset_MbPart_indx = 0;
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- offset_indx = 0;
- nmSubMbHeight = currMB->SubMbPartHeight[mbPartIdx] >> 2;
- nmSubMbWidth = currMB->SubMbPartWidth[mbPartIdx] >> 2;
- mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1) << 1;
- mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) & 2;
-
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- block_x = mbPartIdx_X + ((subMbPartIdx + offset_indx) & 1);
- block_y = mbPartIdx_Y + (((subMbPartIdx + offset_indx) >> 1) & 1);
-
- block_x_1 = block_x - 1;
- block_y_1 = block_y - 1;
- refIdxLXA = refIdxLXB = refIdxLXC = -1;
- pmv_A_x = pmv_A_y = pmv_B_x = pmv_B_y = pmv_C_x = pmv_C_y = 0;
-
- if (block_x)
- {
- avail_a = 1;
- refIdxLXA = currMB->ref_idx_L0[(block_y & 2) + (block_x_1 >> 1)];
- mv = (int16*)(currMB->mvL0 + (block_y << 2) + block_x_1);
- pmv_A_x = *mv++;
- pmv_A_y = *mv;
- }
- else
- {
- avail_a = video->mbAvailA;
- if (avail_a)
- {
- refIdxLXA = MB_A->ref_idx_L0[(block_y & 2) + 1];
- mv = (int16*)(MB_A->mvL0 + (block_y << 2) + 3);
- pmv_A_x = *mv++;
- pmv_A_y = *mv;
- }
- }
-
- if (block_y)
- {
- avail_b = 1;
- refIdxLXB = currMB->ref_idx_L0[(block_y_1 & 2) + (block_x >> 1)];
- mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x);
- pmv_B_x = *mv++;
- pmv_B_y = *mv;
- }
-
- else
- {
- avail_b = video->mbAvailB;
- if (avail_b)
- {
- refIdxLXB = MB_B->ref_idx_L0[2 + (block_x >> 1)];
- mv = (int16*)(MB_B->mvL0 + 12 + block_x);
- pmv_B_x = *mv++;
- pmv_B_y = *mv;
- }
- }
-
- new_block_x = block_x + (currMB->SubMbPartWidth[mbPartIdx] >> 2) - 1;
- avail_c = (C >> ((block_y << 2) + new_block_x)) & 0x1;
-
- if (avail_c)
- {
- /* it guaranteed that block_y > 0 && new_block_x<3 ) */
- refIdxLXC = currMB->ref_idx_L0[(block_y_1 & 2) + ((new_block_x+1) >> 1)];
- mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + (new_block_x + 1));
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- else
- {
- if (block_y == 0 && new_block_x < 3)
- {
- avail_c = video->mbAvailB;
- if (avail_c)
- {
- refIdxLXC = MB_B->ref_idx_L0[2 + ((new_block_x+1)>>1)];
- mv = (int16*)(MB_B->mvL0 + 12 + (new_block_x + 1));
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- }
- else if (block_y == 0 && new_block_x == 3)
- {
- avail_c = video->mbAvailC;
- if (avail_c)
- {
- refIdxLXC = MB_C->ref_idx_L0[2];
- mv = (int16*)(MB_C->mvL0 + 12);
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- }
-
- if (avail_c == 0)
- { /* check D */
- if (block_x && block_y)
- {
- avail_c = 1;
- refIdxLXC = currMB->ref_idx_L0[(block_y_1 & 2) + (block_x_1 >> 1)];
- mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x_1);
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- else if (block_y)
- {
- avail_c = video->mbAvailA;
- if (avail_c)
- {
- refIdxLXC = MB_A->ref_idx_L0[(block_y_1 & 2) + 1];
- mv = (int16*)(MB_A->mvL0 + (block_y_1 << 2) + 3);
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- }
- else if (block_x)
- {
- avail_c = video->mbAvailB;
- if (avail_c)
- {
- refIdxLXC = MB_B->ref_idx_L0[2 + (block_x_1 >> 1)];
- mv = (int16*)(MB_B->mvL0 + 12 + block_x_1);
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- }
- else
- {
- avail_c = video->mbAvailD;
- if (avail_c)
- {
- refIdxLXC = MB_D->ref_idx_L0[3];
- mv = (int16*)(MB_D->mvL0 + 15);
- pmv_C_x = *mv++;
- pmv_C_y = *mv;
- }
- }
- }
- }
-
- offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;
-
- curr_ref_idx = currMB->ref_idx_L0[(block_y & 2) + (block_x >> 1)];
-
- if (avail_a && !(avail_b || avail_c))
- {
- pmv_x = pmv_A_x;
- pmv_y = pmv_A_y;
- }
- else if (((curr_ref_idx == refIdxLXA) + (curr_ref_idx == refIdxLXB) + (curr_ref_idx == refIdxLXC)) == 1)
- {
- if (curr_ref_idx == refIdxLXA)
- {
- pmv_x = pmv_A_x;
- pmv_y = pmv_A_y;
- }
- else if (curr_ref_idx == refIdxLXB)
- {
- pmv_x = pmv_B_x;
- pmv_y = pmv_B_y;
- }
- else
- {
- pmv_x = pmv_C_x;
- pmv_y = pmv_C_y;
- }
- }
- else
- {
- pmv_x = AVC_MEDIAN(pmv_A_x, pmv_B_x, pmv_C_x);
- pmv_y = AVC_MEDIAN(pmv_A_y, pmv_B_y, pmv_C_y);
- }
-
- /* overwrite if special case */
- if (currMB->NumMbPart == 2)
- {
- if (currMB->MbPartWidth == 16)
- {
- if (mbPartIdx == 0)
- {
- if (refIdxLXB == curr_ref_idx)
- {
- pmv_x = pmv_B_x;
- pmv_y = pmv_B_y;
- }
- }
- else if (refIdxLXA == curr_ref_idx)
- {
- pmv_x = pmv_A_x;
- pmv_y = pmv_A_y;
- }
- }
- else
- {
- if (mbPartIdx == 0)
- {
- if (refIdxLXA == curr_ref_idx)
- {
- pmv_x = pmv_A_x;
- pmv_y = pmv_A_y;
- }
- }
- else if (refIdxLXC == curr_ref_idx)
- {
- pmv_x = pmv_C_x;
- pmv_y = pmv_C_y;
- }
- }
- }
-
- mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));
-
- if (encFlag) /* calculate residual MV video->mvd_l0 */
- {
- video->mvd_l0[mbPartIdx][subMbPartIdx][0] = *mv++ - pmv_x;
- video->mvd_l0[mbPartIdx][subMbPartIdx][1] = *mv++ - pmv_y;
- }
- else /* calculate original MV currMB->mvL0 */
- {
- pmv_x += video->mvd_l0[mbPartIdx][subMbPartIdx][0];
- pmv_y += video->mvd_l0[mbPartIdx][subMbPartIdx][1];
-
- for (i = 0; i < nmSubMbHeight; i++)
- {
- for (j = 0; j < nmSubMbWidth; j++)
- {
- *mv++ = pmv_x;
- *mv++ = pmv_y;
- }
- mv += (8 - (j << 1));
- }
- }
- }
- offset_MbPart_indx = currMB->MbPartWidth >> 4;
-
- }
-}
-
-
diff --git a/media/libstagefright/codecs/avc/common/src/reflist.cpp b/media/libstagefright/codecs/avc/common/src/reflist.cpp
deleted file mode 100644
index 4ddc7dd..0000000
--- a/media/libstagefright/codecs/avc/common/src/reflist.cpp
+++ /dev/null
@@ -1,596 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avclib_common.h"
-
-/** see subclause 8.2.4 Decoding process for reference picture lists construction. */
-OSCL_EXPORT_REF void RefListInit(AVCCommonObj *video)
-{
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int slice_type = video->slice_type;
- int i, list0idx;
-
- AVCPictureData *tmp_s;
-
- list0idx = 0;
-
- if (slice_type == AVC_I_SLICE)
- {
- video->refList0Size = 0;
- video->refList1Size = 0;
-
- /* we still have to calculate FrameNumWrap to make sure that all I-slice clip
- can perform sliding_window_operation properly. */
-
- for (i = 0; i < dpb->num_fs; i++)
- {
- if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm))
- {
- /* subclause 8.2.4.1 Decoding process for picture numbers. */
- if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num)
- {
- dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum;
- }
- else
- {
- dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum;
- }
- dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap;
- }
- }
-
-
- return ;
- }
- if (slice_type == AVC_P_SLICE)
- {
- /* Calculate FrameNumWrap and PicNum */
-
- for (i = 0; i < dpb->num_fs; i++)
- {
- if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm))
- {
- /* subclause 8.2.4.1 Decoding process for picture numbers. */
- if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num)
- {
- dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum;
- }
- else
- {
- dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum;
- }
- dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap;
- video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame);
- }
- }
-
- if (list0idx == 0)
- {
- dpb->fs[0]->IsReference = 3;
- video->RefPicList0[0] = &(dpb->fs[0]->frame);
- list0idx = 1;
- }
- /* order list 0 by PicNum from max to min, see subclause 8.2.4.2.1 */
- SortPicByPicNum(video->RefPicList0, list0idx);
- video->refList0Size = list0idx;
-
- /* long term handling */
- for (i = 0; i < dpb->num_fs; i++)
- {
- if (dpb->fs[i]->IsLongTerm == 3)
- {
- /* subclause 8.2.4.1 Decoding process for picture numbers. */
- dpb->fs[i]->frame.LongTermPicNum = dpb->fs[i]->LongTermFrameIdx;
- video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame);
- }
- }
-
- /* order PicNum from min to max, see subclause 8.2.4.2.1 */
- SortPicByPicNumLongTerm(&(video->RefPicList0[video->refList0Size]), list0idx - video->refList0Size);
- video->refList0Size = list0idx;
-
-
- video->refList1Size = 0;
- }
-
-
- if ((video->refList0Size == video->refList1Size) && (video->refList0Size > 1))
- {
- /* check if lists are identical, if yes swap first two elements of listX[1] */
- /* last paragraph of subclause 8.2.4.2.4 */
-
- for (i = 0; i < video->refList0Size; i++)
- {
- if (video->RefPicList0[i] != video->RefPicList1[i])
- {
- break;
- }
- }
- if (i == video->refList0Size)
- {
- tmp_s = video->RefPicList1[0];
- video->RefPicList1[0] = video->RefPicList1[1];
- video->RefPicList1[1] = tmp_s;
- }
- }
-
- /* set max size */
- video->refList0Size = AVC_MIN(video->refList0Size, (int)video->sliceHdr->num_ref_idx_l0_active_minus1 + 1);
- video->refList1Size = AVC_MIN(video->refList1Size, (int)video->sliceHdr->num_ref_idx_l1_active_minus1 + 1);
-
- return ;
-}
-/* see subclause 8.2.4.3 */
-OSCL_EXPORT_REF AVCStatus ReOrderList(AVCCommonObj *video)
-{
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCStatus status = AVC_SUCCESS;
- int slice_type = video->slice_type;
-
- if (slice_type != AVC_I_SLICE)
- {
- if (sliceHdr->ref_pic_list_reordering_flag_l0)
- {
- status = ReorderRefPicList(video, 0);
- if (status != AVC_SUCCESS)
- return status;
- }
- if (video->refList0Size == 0)
- {
- return AVC_FAIL;
- }
- }
- return status;
-}
-
-AVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1)
-{
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCStatus status;
-
- int *list_size;
- int num_ref_idx_lX_active_minus1;
- uint *remapping_of_pic_nums_idc;
- int *abs_diff_pic_num_minus1;
- int *long_term_pic_idx;
- int i;
- int maxPicNum, currPicNum, picNumLXNoWrap, picNumLXPred, picNumLX;
- int refIdxLX = 0;
- void* tmp;
-
- if (!isL1) /* list 0 */
- {
- list_size = &(video->refList0Size);
- num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l0_active_minus1;
- remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l0;
- tmp = (void*)sliceHdr->abs_diff_pic_num_minus1_l0;
- abs_diff_pic_num_minus1 = (int*) tmp;
- tmp = (void*)sliceHdr->long_term_pic_num_l0;
- long_term_pic_idx = (int*) tmp;
- }
- else
- {
- list_size = &(video->refList1Size);
- num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l1_active_minus1;
- remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l1;
- tmp = (void*) sliceHdr->abs_diff_pic_num_minus1_l1;
- abs_diff_pic_num_minus1 = (int*) tmp;
- tmp = (void*) sliceHdr->long_term_pic_num_l1;
- long_term_pic_idx = (int*)tmp;
- }
-
- maxPicNum = video->MaxPicNum;
- currPicNum = video->CurrPicNum;
-
- picNumLXPred = currPicNum; /* initial value */
-
- for (i = 0; remapping_of_pic_nums_idc[i] != 3; i++)
- {
- if ((remapping_of_pic_nums_idc[i] > 3) || (i >= MAX_REF_PIC_LIST_REORDERING))
- {
- return AVC_FAIL; /* out of range */
- }
- /* see subclause 8.2.4.3.1 */
- if (remapping_of_pic_nums_idc[i] < 2)
- {
- if (remapping_of_pic_nums_idc[i] == 0)
- {
- if (picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) < 0)
- picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) + maxPicNum;
- else
- picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1);
- }
- else /* (remapping_of_pic_nums_idc[i] == 1) */
- {
- if (picNumLXPred + (abs_diff_pic_num_minus1[i] + 1) >= maxPicNum)
- picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1) - maxPicNum;
- else
- picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1);
- }
- picNumLXPred = picNumLXNoWrap; /* prediction for the next one */
-
- if (picNumLXNoWrap > currPicNum)
- picNumLX = picNumLXNoWrap - maxPicNum;
- else
- picNumLX = picNumLXNoWrap;
-
- status = ReorderShortTerm(video, picNumLX, &refIdxLX, isL1);
- if (status != AVC_SUCCESS)
- {
- return status;
- }
- }
- else /* (remapping_of_pic_nums_idc[i] == 2), subclause 8.2.4.3.2 */
- {
- status = ReorderLongTerm(video, long_term_pic_idx[i], &refIdxLX, isL1);
- if (status != AVC_SUCCESS)
- {
- return status;
- }
- }
- }
- /* that's a definition */
- *list_size = num_ref_idx_lX_active_minus1 + 1;
-
- return AVC_SUCCESS;
-}
-
-/* see subclause 8.2.4.3.1 */
-AVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1)
-{
- int cIdx, nIdx;
- int num_ref_idx_lX_active_minus1;
- AVCPictureData *picLX, **RefPicListX;
-
- if (!isL1) /* list 0 */
- {
- RefPicListX = video->RefPicList0;
- num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1;
- }
- else
- {
- RefPicListX = video->RefPicList1;
- num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1;
- }
-
- picLX = GetShortTermPic(video, picNumLX);
-
- if (picLX == NULL)
- {
- return AVC_FAIL;
- }
- /* Note RefPicListX has to access element number num_ref_idx_lX_active */
- /* There could be access violation here. */
- if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST)
- {
- return AVC_FAIL;
- }
-
- for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--)
- {
- RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1];
- }
-
- RefPicListX[(*refIdxLX)++ ] = picLX;
-
- nIdx = *refIdxLX;
-
- for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++)
- {
- if (RefPicListX[ cIdx ])
- {
- if ((RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->PicNum != picNumLX))
- {
- RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ];
- }
- }
- }
- return AVC_SUCCESS;
-}
-
-/* see subclause 8.2.4.3.2 */
-AVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1)
-{
- AVCPictureData **RefPicListX;
- int num_ref_idx_lX_active_minus1;
- int cIdx, nIdx;
- AVCPictureData *picLX;
-
- if (!isL1) /* list 0 */
- {
- RefPicListX = video->RefPicList0;
- num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1;
- }
- else
- {
- RefPicListX = video->RefPicList1;
- num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1;
- }
-
- picLX = GetLongTermPic(video, LongTermPicNum);
- if (picLX == NULL)
- {
- return AVC_FAIL;
- }
- /* Note RefPicListX has to access element number num_ref_idx_lX_active */
- /* There could be access violation here. */
- if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST)
- {
- return AVC_FAIL;
- }
- for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--)
- RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1];
-
- RefPicListX[(*refIdxLX)++ ] = picLX;
-
- nIdx = *refIdxLX;
-
- for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++)
- {
- if ((!RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->LongTermPicNum != LongTermPicNum))
- {
- RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ];
- }
- }
- return AVC_SUCCESS;
-}
-
-
-AVCPictureData* GetShortTermPic(AVCCommonObj *video, int picNum)
-{
- int i;
- AVCDecPicBuffer *dpb = video->decPicBuf;
-
- for (i = 0; i < dpb->num_fs; i++)
- {
-
- if (dpb->fs[i]->IsReference == 3)
- {
- if ((dpb->fs[i]->frame.isLongTerm == FALSE) && (dpb->fs[i]->frame.PicNum == picNum))
- {
- return &(dpb->fs[i]->frame);
- }
- }
-
- }
-
- return NULL;
-}
-
-AVCPictureData* GetLongTermPic(AVCCommonObj *video, int LongtermPicNum)
-{
- AVCDecPicBuffer *dpb = video->decPicBuf;
- int i;
-
- for (i = 0; i < dpb->num_fs; i++)
- {
-
- if (dpb->fs[i]->IsReference == 3)
- {
- if ((dpb->fs[i]->frame.isLongTerm == TRUE) && (dpb->fs[i]->frame.LongTermPicNum == LongtermPicNum))
- {
- return &(dpb->fs[i]->frame);
- }
- }
-
- }
- return NULL;
-}
-
-int is_short_ref(AVCPictureData *s)
-{
- return ((s->isReference) && !(s->isLongTerm));
-}
-
-int is_long_ref(AVCPictureData *s)
-{
- return ((s->isReference) && (s->isLongTerm));
-}
-
-
-/* sort by PicNum, descending order */
-void SortPicByPicNum(AVCPictureData *data[], int num)
-{
- int i, j;
- AVCPictureData *temp;
-
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->PicNum > data[i]->PicNum)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
-
- return ;
-}
-
-/* sort by PicNum, ascending order */
-void SortPicByPicNumLongTerm(AVCPictureData *data[], int num)
-{
- int i, j;
- AVCPictureData *temp;
-
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->LongTermPicNum < data[i]->LongTermPicNum)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
-
- return ;
-}
-
-
-/* sort by FrameNumWrap, descending order */
-void SortFrameByFrameNumWrap(AVCFrameStore *data[], int num)
-{
- int i, j;
- AVCFrameStore *temp;
-
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->FrameNumWrap > data[i]->FrameNumWrap)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
-
- return ;
-}
-
-/* sort frames by LongTermFrameIdx, ascending order */
-void SortFrameByLTFrameIdx(AVCFrameStore *data[], int num)
-{
- int i, j;
- AVCFrameStore *temp;
-
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->LongTermFrameIdx < data[i]->LongTermFrameIdx)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
-
- return ;
-}
-
-/* sort PictureData by POC in descending order */
-void SortPicByPOC(AVCPictureData *data[], int num, int descending)
-{
- int i, j;
- AVCPictureData *temp;
-
- if (descending)
- {
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->PicOrderCnt > data[i]->PicOrderCnt)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
- }
- else
- {
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->PicOrderCnt < data[i]->PicOrderCnt)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
- }
- return ;
-}
-
-/* sort PictureData by LongTermPicNum in ascending order */
-void SortPicByLTPicNum(AVCPictureData *data[], int num)
-{
- int i, j;
- AVCPictureData *temp;
-
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->LongTermPicNum < data[i]->LongTermPicNum)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
-
- return ;
-}
-
-/* sort by PicOrderCnt, descending order */
-void SortFrameByPOC(AVCFrameStore *data[], int num, int descending)
-{
- int i, j;
- AVCFrameStore *temp;
-
- if (descending)
- {
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->PicOrderCnt > data[i]->PicOrderCnt)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
- }
- else
- {
- for (i = 0; i < num - 1; i++)
- {
- for (j = i + 1; j < num; j++)
- {
- if (data[j]->PicOrderCnt < data[i]->PicOrderCnt)
- {
- temp = data[j];
- data[j] = data[i];
- data[i] = temp;
- }
- }
- }
- }
-
- return ;
-}
-
-
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
deleted file mode 100644
index e451f30..0000000
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ /dev/null
@@ -1,106 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- src/avcenc_api.cpp \
- src/bitstream_io.cpp \
- src/block.cpp \
- src/findhalfpel.cpp \
- src/header.cpp \
- src/init.cpp \
- src/intra_est.cpp \
- src/motion_comp.cpp \
- src/motion_est.cpp \
- src/rate_control.cpp \
- src/residual.cpp \
- src/sad.cpp \
- src/sad_halfpel.cpp \
- src/slice.cpp \
- src/vlc_encode.cpp
-
-
-LOCAL_MODULE := libstagefright_avcenc
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/../common/include \
- $(TOP)/frameworks/av/media/libstagefright/include \
- $(TOP)/frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS := \
- -DOSCL_IMPORT_REF= -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_EXPORT_REF=
-
-LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow
-
-include $(BUILD_STATIC_LIBRARY)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- SoftAVCEncoder.cpp
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright/include \
- frameworks/native/include/media/hardware \
- frameworks/native/include/media/openmax \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/include \
- $(LOCAL_PATH)/../common/include \
- $(LOCAL_PATH)/../common
-
-LOCAL_CFLAGS := \
- -DOSCL_IMPORT_REF= -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_EXPORT_REF=
-
-
-LOCAL_STATIC_LIBRARIES := \
- libstagefright_avcenc
-
-LOCAL_SHARED_LIBRARIES := \
- libmedia \
- libstagefright_avc_common \
- libstagefright_foundation \
- libstagefright_omx \
- libutils \
- liblog \
-
-
-LOCAL_MODULE := libstagefright_soft_h264enc
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CFLAGS += -Werror
-LOCAL_SANITIZE := signed-integer-overflow cfi
-LOCAL_SANITIZE_DIAG := cfi
-
-include $(BUILD_SHARED_LIBRARY)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- test/h264_enc_test.cpp
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/src \
- $(LOCAL_PATH)/include \
- $(LOCAL_PATH)/../common/include \
- $(LOCAL_PATH)/../common
-
-LOCAL_CFLAGS := \
- -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
-LOCAL_SANITIZE := signed-integer-overflow
-
-LOCAL_STATIC_LIBRARIES := \
- libstagefright_avcenc
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright_avc_common
-
-LOCAL_MODULE := libstagefright_h264enc_test
-
-LOCAL_MODULE_TAGS := tests
-
-include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/avc/enc/MODULE_LICENSE_APACHE2 b/media/libstagefright/codecs/avc/enc/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/media/libstagefright/codecs/avc/enc/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/media/libstagefright/codecs/avc/enc/NOTICE b/media/libstagefright/codecs/avc/enc/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/media/libstagefright/codecs/avc/enc/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
- Copyright (c) 2005-2008, The Android Open Source Project
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
deleted file mode 100644
index cce6d15..0000000
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "SoftAVCEncoder"
-#include <utils/Log.h>
-#include <utils/misc.h>
-
-#include "avcenc_api.h"
-#include "avcenc_int.h"
-#include "OMX_Video.h"
-
-#include <HardwareAPI.h>
-#include <MetadataBufferType.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <ui/Rect.h>
-#include <ui/GraphicBufferMapper.h>
-
-#include "SoftAVCEncoder.h"
-
-#if LOG_NDEBUG
-#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
-#else
-#define UNUSED_UNLESS_VERBOSE(x)
-#endif
-
-namespace android {
-
-template<class T>
-static void InitOMXParams(T *params) {
- params->nSize = sizeof(T);
- params->nVersion.s.nVersionMajor = 1;
- params->nVersion.s.nVersionMinor = 0;
- params->nVersion.s.nRevision = 0;
- params->nVersion.s.nStep = 0;
-}
-
-static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel2 },
-};
-
-typedef struct LevelConversion {
- OMX_U32 omxLevel;
- AVCLevel avcLevel;
- uint32_t maxMacroBlocks;
-} LevelConcersion;
-
-static LevelConversion ConversionTable[] = {
- { OMX_VIDEO_AVCLevel1, AVC_LEVEL1_B, 99 },
- { OMX_VIDEO_AVCLevel1b, AVC_LEVEL1, 99 },
- { OMX_VIDEO_AVCLevel11, AVC_LEVEL1_1, 396 },
- { OMX_VIDEO_AVCLevel12, AVC_LEVEL1_2, 396 },
- { OMX_VIDEO_AVCLevel13, AVC_LEVEL1_3, 396 },
- { OMX_VIDEO_AVCLevel2, AVC_LEVEL2, 396 },
-#if 0
- // encoding speed is very poor if video resolution
- // is higher than CIF or if level is higher than 2
- { OMX_VIDEO_AVCLevel21, AVC_LEVEL2_1, 792 },
- { OMX_VIDEO_AVCLevel22, AVC_LEVEL2_2, 1620 },
- { OMX_VIDEO_AVCLevel3, AVC_LEVEL3, 1620 },
- { OMX_VIDEO_AVCLevel31, AVC_LEVEL3_1, 3600 },
- { OMX_VIDEO_AVCLevel32, AVC_LEVEL3_2, 5120 },
- { OMX_VIDEO_AVCLevel4, AVC_LEVEL4, 8192 },
- { OMX_VIDEO_AVCLevel41, AVC_LEVEL4_1, 8192 },
- { OMX_VIDEO_AVCLevel42, AVC_LEVEL4_2, 8704 },
- { OMX_VIDEO_AVCLevel5, AVC_LEVEL5, 22080 },
- { OMX_VIDEO_AVCLevel51, AVC_LEVEL5_1, 36864 },
-#endif
-};
-
-static status_t ConvertOmxAvcLevelToAvcSpecLevel(
- OMX_U32 omxLevel, AVCLevel *avcLevel) {
- for (size_t i = 0, n = sizeof(ConversionTable)/sizeof(ConversionTable[0]);
- i < n; ++i) {
- if (omxLevel == ConversionTable[i].omxLevel) {
- *avcLevel = ConversionTable[i].avcLevel;
- return OK;
- }
- }
-
- ALOGE("ConvertOmxAvcLevelToAvcSpecLevel: %d level not supported",
- (int32_t)omxLevel);
-
- return BAD_VALUE;
-}
-
-static status_t ConvertAvcSpecLevelToOmxAvcLevel(
- AVCLevel avcLevel, OMX_U32 *omxLevel) {
- for (size_t i = 0, n = sizeof(ConversionTable)/sizeof(ConversionTable[0]);
- i < n; ++i) {
- if (avcLevel == ConversionTable[i].avcLevel) {
- *omxLevel = ConversionTable[i].omxLevel;
- return OK;
- }
- }
-
- ALOGE("ConvertAvcSpecLevelToOmxAvcLevel: %d level not supported",
- (int32_t) avcLevel);
-
- return BAD_VALUE;
-}
-
-static void* MallocWrapper(
- void * /* userData */, int32_t size, int32_t /* attrs */) {
- void *ptr = malloc(size);
- if (ptr)
- memset(ptr, 0, size);
- return ptr;
-}
-
-static void FreeWrapper(void * /* userData */, void* ptr) {
- free(ptr);
-}
-
-static int32_t DpbAllocWrapper(void *userData,
- unsigned int sizeInMbs, unsigned int numBuffers) {
- SoftAVCEncoder *encoder = static_cast<SoftAVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->allocOutputBuffers(sizeInMbs, numBuffers);
-}
-
-static int32_t BindFrameWrapper(
- void *userData, int32_t index, uint8_t **yuv) {
- SoftAVCEncoder *encoder = static_cast<SoftAVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->bindOutputBuffer(index, yuv);
-}
-
-static void UnbindFrameWrapper(void *userData, int32_t index) {
- SoftAVCEncoder *encoder = static_cast<SoftAVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->unbindOutputBuffer(index);
-}
-
-SoftAVCEncoder::SoftAVCEncoder(
- const char *name,
- const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData,
- OMX_COMPONENTTYPE **component)
- : SoftVideoEncoderOMXComponent(
- name, "video_encoder.avc", OMX_VIDEO_CodingAVC,
- kProfileLevels, NELEM(kProfileLevels),
- 176 /* width */, 144 /* height */,
- callbacks, appData, component),
- mIDRFrameRefreshIntervalInSec(1),
- mAVCEncProfile(AVC_BASELINE),
- mAVCEncLevel(AVC_LEVEL2),
- mNumInputFrames(-1),
- mPrevTimestampUs(-1),
- mStarted(false),
- mSawInputEOS(false),
- mSignalledError(false),
- mHandle(new tagAVCHandle),
- mEncParams(new tagAVCEncParam),
- mInputFrameData(NULL),
- mSliceGroup(NULL) {
-
- const size_t kOutputBufferSize =
- 320 * ConversionTable[NELEM(ConversionTable) - 1].maxMacroBlocks;
-
- initPorts(
- kNumBuffers, kNumBuffers, kOutputBufferSize,
- MEDIA_MIMETYPE_VIDEO_AVC, 2 /* minCompressionRatio */);
-
- ALOGI("Construct SoftAVCEncoder");
-}
-
-SoftAVCEncoder::~SoftAVCEncoder() {
- ALOGV("Destruct SoftAVCEncoder");
- releaseEncoder();
- List<BufferInfo *> &outQueue = getPortQueue(1);
- List<BufferInfo *> &inQueue = getPortQueue(0);
- CHECK(outQueue.empty());
- CHECK(inQueue.empty());
-}
-
-OMX_ERRORTYPE SoftAVCEncoder::initEncParams() {
- CHECK(mHandle != NULL);
- memset(mHandle, 0, sizeof(tagAVCHandle));
- mHandle->AVCObject = NULL;
- mHandle->userData = this;
- mHandle->CBAVC_DPBAlloc = DpbAllocWrapper;
- mHandle->CBAVC_FrameBind = BindFrameWrapper;
- mHandle->CBAVC_FrameUnbind = UnbindFrameWrapper;
- mHandle->CBAVC_Malloc = MallocWrapper;
- mHandle->CBAVC_Free = FreeWrapper;
-
- CHECK(mEncParams != NULL);
- memset(mEncParams, 0, sizeof(*mEncParams));
- mEncParams->rate_control = AVC_ON;
- mEncParams->initQP = 0;
- mEncParams->init_CBP_removal_delay = 1600;
-
- mEncParams->intramb_refresh = 0;
- mEncParams->auto_scd = AVC_ON;
- mEncParams->out_of_band_param_set = AVC_ON;
- mEncParams->poc_type = 2;
- mEncParams->log2_max_poc_lsb_minus_4 = 12;
- mEncParams->delta_poc_zero_flag = 0;
- mEncParams->offset_poc_non_ref = 0;
- mEncParams->offset_top_bottom = 0;
- mEncParams->num_ref_in_cycle = 0;
- mEncParams->offset_poc_ref = NULL;
-
- mEncParams->num_ref_frame = 1;
- mEncParams->num_slice_group = 1;
- mEncParams->fmo_type = 0;
-
- mEncParams->db_filter = AVC_ON;
- mEncParams->disable_db_idc = 0;
-
- mEncParams->alpha_offset = 0;
- mEncParams->beta_offset = 0;
- mEncParams->constrained_intra_pred = AVC_OFF;
-
- mEncParams->data_par = AVC_OFF;
- mEncParams->fullsearch = AVC_OFF;
- mEncParams->search_range = 16;
- mEncParams->sub_pel = AVC_OFF;
- mEncParams->submb_pred = AVC_OFF;
- mEncParams->rdopt_mode = AVC_OFF;
- mEncParams->bidir_pred = AVC_OFF;
-
- mEncParams->use_overrun_buffer = AVC_OFF;
-
- if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
- // Color conversion is needed.
- free(mInputFrameData);
- if (((uint64_t)mWidth * mHeight) > ((uint64_t)INT32_MAX / 3)) {
- ALOGE("Buffer size is too big.");
- return OMX_ErrorUndefined;
- }
- mInputFrameData =
- (uint8_t *) malloc((mWidth * mHeight * 3 ) >> 1);
- CHECK(mInputFrameData != NULL);
- }
-
- // PV's AVC encoder requires the video dimension of multiple
- if (mWidth % 16 != 0 || mHeight % 16 != 0) {
- ALOGE("Video frame size %dx%d must be a multiple of 16",
- mWidth, mHeight);
- return OMX_ErrorBadParameter;
- }
-
- mEncParams->width = mWidth;
- mEncParams->height = mHeight;
- mEncParams->bitrate = mBitrate;
- mEncParams->frame_rate = (1000 * mFramerate) >> 16; // In frames/ms!, mFramerate is in Q16
- mEncParams->CPB_size = (uint32_t) (mBitrate >> 1);
-
- int32_t nMacroBlocks = divUp(mWidth, 16) * divUp(mHeight, 16);
- CHECK(mSliceGroup == NULL);
- if ((size_t)nMacroBlocks > SIZE_MAX / sizeof(uint32_t)) {
- ALOGE("requested memory size is too big.");
- return OMX_ErrorUndefined;
- }
- mSliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
- CHECK(mSliceGroup != NULL);
- for (int ii = 0, idx = 0; ii < nMacroBlocks; ++ii) {
- mSliceGroup[ii] = idx++;
- if (idx >= mEncParams->num_slice_group) {
- idx = 0;
- }
- }
- mEncParams->slice_group = mSliceGroup;
-
- // Set IDR frame refresh interval
- if (mIDRFrameRefreshIntervalInSec < 0) {
- mEncParams->idr_period = -1;
- } else if (mIDRFrameRefreshIntervalInSec == 0) {
- mEncParams->idr_period = 1; // All I frames
- } else {
- mEncParams->idr_period =
- (mIDRFrameRefreshIntervalInSec * mFramerate) >> 16; // mFramerate is in Q16
- }
-
- // Set profile and level
- mEncParams->profile = mAVCEncProfile;
- mEncParams->level = mAVCEncLevel;
-
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftAVCEncoder::initEncoder() {
- CHECK(!mStarted);
-
- OMX_ERRORTYPE errType = OMX_ErrorNone;
- if (OMX_ErrorNone != (errType = initEncParams())) {
- ALOGE("Failed to initialized encoder params");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return errType;
- }
-
- AVCEnc_Status err;
- err = PVAVCEncInitialize(mHandle, mEncParams, NULL, NULL);
- if (err != AVCENC_SUCCESS) {
- ALOGE("Failed to initialize the encoder: %d", err);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return OMX_ErrorUndefined;
- }
-
- mNumInputFrames = -2; // 1st two buffers contain SPS and PPS
- mSpsPpsHeaderReceived = false;
- mReadyForNextFrame = true;
- mIsIDRFrame = false;
- mStarted = true;
-
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftAVCEncoder::releaseEncoder() {
- if (!mStarted) {
- return OMX_ErrorNone;
- }
-
- PVAVCCleanUpEncoder(mHandle);
- releaseOutputBuffers();
-
- free(mInputFrameData);
- mInputFrameData = NULL;
-
- free(mSliceGroup);
- mSliceGroup = NULL;
-
- delete mEncParams;
- mEncParams = NULL;
-
- delete mHandle;
- mHandle = NULL;
-
- mStarted = false;
-
- return OMX_ErrorNone;
-}
-
-void SoftAVCEncoder::releaseOutputBuffers() {
- for (size_t i = 0; i < mOutputBuffers.size(); ++i) {
- MediaBuffer *buffer = mOutputBuffers.editItemAt(i);
- buffer->setObserver(NULL);
- buffer->release();
- }
- mOutputBuffers.clear();
-}
-
-OMX_ERRORTYPE SoftAVCEncoder::internalGetParameter(
- OMX_INDEXTYPE index, OMX_PTR params) {
- switch (index) {
- case OMX_IndexParamVideoBitrate:
- {
- OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
- (OMX_VIDEO_PARAM_BITRATETYPE *) params;
-
- if (!isValidOMXParam(bitRate)) {
- return OMX_ErrorBadParameter;
- }
-
- if (bitRate->nPortIndex != 1) {
- return OMX_ErrorUndefined;
- }
-
- bitRate->eControlRate = OMX_Video_ControlRateVariable;
- bitRate->nTargetBitrate = mBitrate;
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoAvc:
- {
- OMX_VIDEO_PARAM_AVCTYPE *avcParams =
- (OMX_VIDEO_PARAM_AVCTYPE *)params;
-
- if (!isValidOMXParam(avcParams)) {
- return OMX_ErrorBadParameter;
- }
-
- if (avcParams->nPortIndex != 1) {
- return OMX_ErrorUndefined;
- }
-
- avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline;
- OMX_U32 omxLevel = AVC_LEVEL2;
- if (OMX_ErrorNone !=
- ConvertAvcSpecLevelToOmxAvcLevel(mAVCEncLevel, &omxLevel)) {
- return OMX_ErrorUndefined;
- }
-
- avcParams->eLevel = (OMX_VIDEO_AVCLEVELTYPE) omxLevel;
- avcParams->nRefFrames = 1;
- avcParams->nBFrames = 0;
- avcParams->bUseHadamard = OMX_TRUE;
- avcParams->nAllowedPictureTypes =
- (OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP);
- avcParams->nRefIdx10ActiveMinus1 = 0;
- avcParams->nRefIdx11ActiveMinus1 = 0;
- avcParams->bWeightedPPrediction = OMX_FALSE;
- avcParams->bEntropyCodingCABAC = OMX_FALSE;
- avcParams->bconstIpred = OMX_FALSE;
- avcParams->bDirect8x8Inference = OMX_FALSE;
- avcParams->bDirectSpatialTemporal = OMX_FALSE;
- avcParams->nCabacInitIdc = 0;
- return OMX_ErrorNone;
- }
-
- default:
- return SoftVideoEncoderOMXComponent::internalGetParameter(index, params);
- }
-}
-
-OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
- OMX_INDEXTYPE index, const OMX_PTR params) {
- int32_t indexFull = index;
-
- switch (indexFull) {
- case OMX_IndexParamVideoBitrate:
- {
- OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
- (OMX_VIDEO_PARAM_BITRATETYPE *) params;
-
- if (!isValidOMXParam(bitRate)) {
- return OMX_ErrorBadParameter;
- }
-
- if (bitRate->nPortIndex != 1 ||
- bitRate->eControlRate != OMX_Video_ControlRateVariable) {
- return OMX_ErrorUndefined;
- }
-
- mBitrate = bitRate->nTargetBitrate;
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoAvc:
- {
- OMX_VIDEO_PARAM_AVCTYPE *avcType =
- (OMX_VIDEO_PARAM_AVCTYPE *)params;
-
- if (!isValidOMXParam(avcType)) {
- return OMX_ErrorBadParameter;
- }
-
- if (avcType->nPortIndex != 1) {
- return OMX_ErrorUndefined;
- }
-
- // PV's AVC encoder only supports baseline profile
- if (avcType->eProfile != OMX_VIDEO_AVCProfileBaseline ||
- avcType->nRefFrames != 1 ||
- avcType->nBFrames != 0 ||
- avcType->bUseHadamard != OMX_TRUE ||
- (avcType->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) != 0 ||
- avcType->nRefIdx10ActiveMinus1 != 0 ||
- avcType->nRefIdx11ActiveMinus1 != 0 ||
- avcType->bWeightedPPrediction != OMX_FALSE ||
- avcType->bEntropyCodingCABAC != OMX_FALSE ||
- avcType->bconstIpred != OMX_FALSE ||
- avcType->bDirect8x8Inference != OMX_FALSE ||
- avcType->bDirectSpatialTemporal != OMX_FALSE ||
- avcType->nCabacInitIdc != 0) {
- return OMX_ErrorUndefined;
- }
-
- if (OK != ConvertOmxAvcLevelToAvcSpecLevel(avcType->eLevel, &mAVCEncLevel)) {
- return OMX_ErrorUndefined;
- }
-
- return OMX_ErrorNone;
- }
-
- default:
- return SoftVideoEncoderOMXComponent::internalSetParameter(index, params);
- }
-}
-
-void SoftAVCEncoder::onQueueFilled(OMX_U32 /* portIndex */) {
- if (mSignalledError || mSawInputEOS) {
- return;
- }
-
- if (!mStarted) {
- if (OMX_ErrorNone != initEncoder()) {
- return;
- }
- }
-
- List<BufferInfo *> &inQueue = getPortQueue(0);
- List<BufferInfo *> &outQueue = getPortQueue(1);
-
- while (!mSawInputEOS && !inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
- BufferInfo *outInfo = *outQueue.begin();
- OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-
- outHeader->nTimeStamp = 0;
- outHeader->nFlags = 0;
- outHeader->nOffset = 0;
- outHeader->nFilledLen = 0;
- outHeader->nOffset = 0;
-
- uint8_t *outPtr = (uint8_t *) outHeader->pBuffer;
- uint32_t dataLength = outHeader->nAllocLen;
-
- if (!mSpsPpsHeaderReceived && mNumInputFrames < 0) {
- // 4 bytes are reserved for holding the start code 0x00000001
- // of the sequence parameter set at the beginning.
- outPtr += 4;
- dataLength -= 4;
- }
-
- int32_t type;
- AVCEnc_Status encoderStatus = AVCENC_SUCCESS;
-
- // Combine SPS and PPS and place them in the very first output buffer
- // SPS and PPS are separated by start code 0x00000001
- // Assume that we have exactly one SPS and exactly one PPS.
- while (!mSpsPpsHeaderReceived && mNumInputFrames <= 0) {
- encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
- if (encoderStatus == AVCENC_WRONG_STATE) {
- mSpsPpsHeaderReceived = true;
- CHECK_EQ(0, mNumInputFrames); // 1st video frame is 0
- outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- return;
- } else {
- switch (type) {
- case AVC_NALTYPE_SPS:
- ++mNumInputFrames;
- memcpy((uint8_t *)outHeader->pBuffer, "\x00\x00\x00\x01", 4);
- outHeader->nFilledLen = 4 + dataLength;
- outPtr += (dataLength + 4); // 4 bytes for next start code
- dataLength = outHeader->nAllocLen - outHeader->nFilledLen;
- break;
- default:
- CHECK_EQ(AVC_NALTYPE_PPS, type);
- ++mNumInputFrames;
- memcpy((uint8_t *) outHeader->pBuffer + outHeader->nFilledLen,
- "\x00\x00\x00\x01", 4);
- outHeader->nFilledLen += (dataLength + 4);
- outPtr += (dataLength + 4);
- break;
- }
- }
- }
-
- // Get next input video frame
- if (mReadyForNextFrame) {
- // Save the input buffer info so that it can be
- // passed to an output buffer
- InputBufferInfo info;
- info.mTimeUs = inHeader->nTimeStamp;
- info.mFlags = inHeader->nFlags;
- mInputBufferInfoVec.push(info);
- mPrevTimestampUs = inHeader->nTimeStamp;
-
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- mSawInputEOS = true;
- }
-
- if (inHeader->nFilledLen > 0) {
- AVCFrameIO videoInput;
- memset(&videoInput, 0, sizeof(videoInput));
- videoInput.height = align(mHeight, 16);
- videoInput.pitch = align(mWidth, 16);
- videoInput.coding_timestamp = (inHeader->nTimeStamp + 500) / 1000; // in ms
- const uint8_t *inputData = NULL;
- if (mInputDataIsMeta) {
- inputData =
- extractGraphicBuffer(
- mInputFrameData, (mWidth * mHeight * 3) >> 1,
- inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen,
- mWidth, mHeight);
- if (inputData == NULL) {
- ALOGE("Unable to extract gralloc buffer in metadata mode");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- }
- // TODO: Verify/convert pixel format enum
- } else {
- inputData = (const uint8_t *)inHeader->pBuffer + inHeader->nOffset;
- if (mColorFormat != OMX_COLOR_FormatYUV420Planar) {
- ConvertYUV420SemiPlanarToYUV420Planar(
- inputData, mInputFrameData, mWidth, mHeight);
- inputData = mInputFrameData;
- }
- }
-
- CHECK(inputData != NULL);
- videoInput.YCbCr[0] = (uint8_t *)inputData;
- videoInput.YCbCr[1] = videoInput.YCbCr[0] + videoInput.height * videoInput.pitch;
- videoInput.YCbCr[2] = videoInput.YCbCr[1] +
- ((videoInput.height * videoInput.pitch) >> 2);
- videoInput.disp_order = mNumInputFrames;
-
- encoderStatus = PVAVCEncSetInput(mHandle, &videoInput);
- if (encoderStatus == AVCENC_SUCCESS || encoderStatus == AVCENC_NEW_IDR) {
- mReadyForNextFrame = false;
- ++mNumInputFrames;
- if (encoderStatus == AVCENC_NEW_IDR) {
- mIsIDRFrame = 1;
- }
- } else {
- if (encoderStatus < AVCENC_SUCCESS) {
- ALOGE("encoderStatus = %d at line %d", encoderStatus, __LINE__);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- } else {
- ALOGV("encoderStatus = %d at line %d", encoderStatus, __LINE__);
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
- return;
- }
- }
- }
- }
-
- // Encode an input video frame
- CHECK(encoderStatus == AVCENC_SUCCESS || encoderStatus == AVCENC_NEW_IDR);
- dataLength = outHeader->nAllocLen; // Reset the output buffer length
- if (inHeader->nFilledLen > 0) {
- if (outHeader->nAllocLen >= 4) {
- memcpy(outPtr, "\x00\x00\x00\x01", 4);
- outPtr += 4;
- dataLength -= 4;
- }
- encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
- dataLength = outPtr + dataLength - outHeader->pBuffer;
- if (encoderStatus == AVCENC_SUCCESS) {
- CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
- } else if (encoderStatus == AVCENC_PICTURE_READY) {
- CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
- if (mIsIDRFrame) {
- outHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
- mIsIDRFrame = false;
- }
- mReadyForNextFrame = true;
- AVCFrameIO recon;
- if (PVAVCEncGetRecon(mHandle, &recon) == AVCENC_SUCCESS) {
- PVAVCEncReleaseRecon(mHandle, &recon);
- }
- } else {
- dataLength = 0;
- mReadyForNextFrame = true;
- }
-
- if (encoderStatus < AVCENC_SUCCESS) {
- ALOGE("encoderStatus = %d at line %d", encoderStatus, __LINE__);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- }
- } else {
- dataLength = 0;
- }
-
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- outQueue.erase(outQueue.begin());
- CHECK(!mInputBufferInfoVec.empty());
- InputBufferInfo *inputBufInfo = mInputBufferInfoVec.begin();
- outHeader->nTimeStamp = inputBufInfo->mTimeUs;
- outHeader->nFlags |= (inputBufInfo->mFlags | OMX_BUFFERFLAG_ENDOFFRAME);
- if (mSawInputEOS) {
- outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
- }
- outHeader->nFilledLen = dataLength;
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- mInputBufferInfoVec.erase(mInputBufferInfoVec.begin());
- }
-}
-
-int32_t SoftAVCEncoder::allocOutputBuffers(
- unsigned int sizeInMbs, unsigned int numBuffers) {
- CHECK(mOutputBuffers.isEmpty());
- size_t frameSize = (sizeInMbs << 7) * 3;
- for (unsigned int i = 0; i < numBuffers; ++i) {
- MediaBuffer *buffer = new MediaBuffer(frameSize);
- buffer->setObserver(this);
- mOutputBuffers.push(buffer);
- }
-
- return 1;
-}
-
-void SoftAVCEncoder::unbindOutputBuffer(int32_t index) {
- CHECK(index >= 0);
-}
-
-int32_t SoftAVCEncoder::bindOutputBuffer(int32_t index, uint8_t **yuv) {
- CHECK(index >= 0);
- CHECK(index < (int32_t) mOutputBuffers.size());
- *yuv = (uint8_t *) mOutputBuffers[index]->data();
-
- return 1;
-}
-
-void SoftAVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
- UNUSED_UNLESS_VERBOSE(buffer);
- ALOGV("signalBufferReturned: %p", buffer);
-}
-
-} // namespace android
-
-android::SoftOMXComponent *createSoftOMXComponent(
- const char *name, const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData, OMX_COMPONENTTYPE **component) {
- return new android::SoftAVCEncoder(name, callbacks, appData, component);
-}
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
deleted file mode 100644
index 81de109..0000000
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SOFT_AVC_ENCODER_H_
-#define SOFT_AVC_ENCODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <utils/Vector.h>
-
-#include "avcenc_api.h"
-#include "SoftVideoEncoderOMXComponent.h"
-
-namespace android {
-
-struct SoftAVCEncoder : public MediaBufferObserver,
- public SoftVideoEncoderOMXComponent {
- SoftAVCEncoder(
- const char *name,
- const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData,
- OMX_COMPONENTTYPE **component);
-
- // Override SimpleSoftOMXComponent methods
- virtual OMX_ERRORTYPE internalGetParameter(
- OMX_INDEXTYPE index, OMX_PTR params);
-
- virtual OMX_ERRORTYPE internalSetParameter(
- OMX_INDEXTYPE index, const OMX_PTR params);
-
- virtual void onQueueFilled(OMX_U32 portIndex);
-
- // Implement MediaBufferObserver
- virtual void signalBufferReturned(MediaBuffer *buffer);
-
-
- // Callbacks required by PV's encoder
- int32_t allocOutputBuffers(unsigned int sizeInMbs, unsigned int numBuffers);
- void unbindOutputBuffer(int32_t index);
- int32_t bindOutputBuffer(int32_t index, uint8_t **yuv);
-
-protected:
- virtual ~SoftAVCEncoder();
-
-private:
- enum {
- kNumBuffers = 2,
- };
-
- // OMX input buffer's timestamp and flags
- typedef struct {
- int64_t mTimeUs;
- int32_t mFlags;
- } InputBufferInfo;
-
- int32_t mIDRFrameRefreshIntervalInSec;
- AVCProfile mAVCEncProfile;
- AVCLevel mAVCEncLevel;
-
- int64_t mNumInputFrames;
- int64_t mPrevTimestampUs;
- bool mStarted;
- bool mSpsPpsHeaderReceived;
- bool mReadyForNextFrame;
- bool mSawInputEOS;
- bool mSignalledError;
- bool mIsIDRFrame;
-
- tagAVCHandle *mHandle;
- tagAVCEncParam *mEncParams;
- uint8_t *mInputFrameData;
- uint32_t *mSliceGroup;
- Vector<MediaBuffer *> mOutputBuffers;
- Vector<InputBufferInfo> mInputBufferInfoVec;
-
- OMX_ERRORTYPE initEncParams();
- OMX_ERRORTYPE initEncoder();
- OMX_ERRORTYPE releaseEncoder();
- void releaseOutputBuffers();
-
- DISALLOW_EVIL_CONSTRUCTORS(SoftAVCEncoder);
-};
-
-} // namespace android
-
-#endif // SOFT_AVC_ENCODER_H_
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp b/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp
deleted file mode 100644
index cf14e10..0000000
--- a/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp
+++ /dev/null
@@ -1,736 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_api.h"
-#include "avcenc_lib.h"
-
-/* ======================================================================== */
-/* Function : PVAVCGetNALType() */
-/* Date : 11/4/2003 */
-/* Purpose : Sniff NAL type from the bitstream */
-/* In/out : */
-/* Return : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetNALType(unsigned char *bitstream, int size,
- int *nal_type, int *nal_ref_idc)
-{
- int forbidden_zero_bit;
- if (size > 0)
- {
- forbidden_zero_bit = bitstream[0] >> 7;
- if (forbidden_zero_bit != 0)
- return AVCENC_FAIL;
- *nal_ref_idc = (bitstream[0] & 0x60) >> 5;
- *nal_type = bitstream[0] & 0x1F;
- return AVCENC_SUCCESS;
- }
-
- return AVCENC_FAIL;
-}
-
-
-/* ======================================================================== */
-/* Function : PVAVCEncInitialize() */
-/* Date : 3/18/2004 */
-/* Purpose : Initialize the encoder library, allocate memory and verify */
-/* the profile/level support/settings. */
-/* In/out : Encoding parameters. */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam,
- void* extSPS, void* extPPS)
-{
- AVCEnc_Status status;
- AVCEncObject *encvid;
- AVCCommonObj *video;
- uint32 *userData = (uint32*) avcHandle->userData;
- int framesize;
-
- if (avcHandle->AVCObject != NULL)
- {
- return AVCENC_ALREADY_INITIALIZED; /* It's already initialized, need to cleanup first */
- }
-
- /* not initialized */
-
- /* allocate videoObject */
- avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncObject), DEFAULT_ATTR);
- if (avcHandle->AVCObject == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- encvid = (AVCEncObject*) avcHandle->AVCObject;
-
- encvid->enc_state = AVCEnc_Initializing;
-
- encvid->avcHandle = avcHandle;
-
- encvid->common = (AVCCommonObj*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), DEFAULT_ATTR);
- if (encvid->common == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- video = encvid->common;
-
- /* allocate bitstream structure */
- encvid->bitstream = (AVCEncBitstream*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncBitstream), DEFAULT_ATTR);
- if (encvid->bitstream == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
- encvid->bitstream->encvid = encvid; /* to point back for reallocation */
-
- /* allocate sequence parameter set structure */
- video->currSeqParams = (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR);
- if (video->currSeqParams == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* allocate picture parameter set structure */
- video->currPicParams = (AVCPicParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR);
- if (video->currPicParams == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* allocate slice header structure */
- video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), DEFAULT_ATTR);
- if (video->sliceHdr == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* allocate encoded picture buffer structure*/
- video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), DEFAULT_ATTR);
- if (video->decPicBuf == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* allocate rate control structure */
- encvid->rateCtrl = (AVCRateControl*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCRateControl), DEFAULT_ATTR);
- if (encvid->rateCtrl == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* reset frame list, not really needed */
- video->currPic = NULL;
- video->currFS = NULL;
- encvid->currInput = NULL;
- video->prevRefPic = NULL;
-
- /* now read encParams, and allocate dimension-dependent variables */
- /* such as mblock */
- status = SetEncodeParam(avcHandle, encParam, extSPS, extPPS); /* initialized variables to be used in SPS*/
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- if (encParam->use_overrun_buffer == AVC_ON)
- {
- /* allocate overrun buffer */
- encvid->oBSize = encvid->rateCtrl->cpbSize;
- if (encvid->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE)
- {
- encvid->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE;
- }
- encvid->overrunBuffer = (uint8*) avcHandle->CBAVC_Malloc(userData, encvid->oBSize, DEFAULT_ATTR);
- if (encvid->overrunBuffer == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
- }
- else
- {
- encvid->oBSize = 0;
- encvid->overrunBuffer = NULL;
- }
-
- /* allocate frame size dependent structures */
- framesize = video->FrameHeightInMbs * video->PicWidthInMbs;
-
- video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR);
- if (video->mblock == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits * 2, DEFAULT_ATTR);
- if (video->MbToSliceGroupMap == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- encvid->mot16x16 = (AVCMV*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMV) * framesize, DEFAULT_ATTR);
- if (encvid->mot16x16 == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- encvid->intraSearch = (uint8*) avcHandle->CBAVC_Malloc(userData, sizeof(uint8) * framesize, DEFAULT_ATTR);
- if (encvid->intraSearch == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- encvid->min_cost = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(int) * framesize, DEFAULT_ATTR);
- if (encvid->min_cost == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* initialize motion search related memory */
- if (AVCENC_SUCCESS != InitMotionSearchModule(avcHandle))
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- if (AVCENC_SUCCESS != InitRateControlModule(avcHandle))
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- /* intialize function pointers */
- encvid->functionPointer = (AVCEncFuncPtr*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncFuncPtr), DEFAULT_ATTR);
- if (encvid->functionPointer == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
- encvid->functionPointer->SAD_Macroblock = &AVCSAD_Macroblock_C;
- encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
- encvid->functionPointer->SAD_MB_HalfPel[1] = &AVCSAD_MB_HalfPel_Cxh;
- encvid->functionPointer->SAD_MB_HalfPel[2] = &AVCSAD_MB_HalfPel_Cyh;
- encvid->functionPointer->SAD_MB_HalfPel[3] = &AVCSAD_MB_HalfPel_Cxhyh;
-
- /* initialize timing control */
- encvid->modTimeRef = 0; /* ALWAYS ASSUME THAT TIMESTAMP START FROM 0 !!!*/
- video->prevFrameNum = 0;
- encvid->prevCodedFrameNum = 0;
- encvid->dispOrdPOCRef = 0;
-
- if (encvid->outOfBandParamSet == TRUE)
- {
- encvid->enc_state = AVCEnc_Encoding_SPS;
- }
- else
- {
- encvid->enc_state = AVCEnc_Analyzing_Frame;
- }
-
- return AVCENC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCEncGetMaxOutputSize() */
-/* Date : 11/29/2008 */
-/* Purpose : Return max output buffer size that apps should allocate for */
-/* output buffer. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : size */
-/* ======================================================================== */
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size)
-{
- AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
-
- if (encvid == NULL)
- {
- return AVCENC_UNINITIALIZED;
- }
-
- *size = encvid->rateCtrl->cpbSize;
-
- return AVCENC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCEncSetInput() */
-/* Date : 4/18/2004 */
-/* Purpose : To feed an unencoded original frame to the encoder library. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input)
-{
- AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
- AVCCommonObj *video = encvid->common;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
-
- AVCEnc_Status status;
- uint frameNum;
-
- if (encvid == NULL)
- {
- return AVCENC_UNINITIALIZED;
- }
-
- if (encvid->enc_state == AVCEnc_WaitingForBuffer)
- {
- goto RECALL_INITFRAME;
- }
- else if (encvid->enc_state != AVCEnc_Analyzing_Frame)
- {
- return AVCENC_FAIL;
- }
-
- if (input->pitch > 0xFFFF)
- {
- return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch
- }
-
- /***********************************/
-
- /* Let's rate control decide whether to encode this frame or not */
- /* Also set video->nal_unit_type, sliceHdr->slice_type, video->slice_type */
- if (AVCENC_SUCCESS != RCDetermineFrameNum(encvid, rateCtrl, input->coding_timestamp, &frameNum))
- {
- return AVCENC_SKIPPED_PICTURE; /* not time to encode, thus skipping */
- }
-
- /* we may not need this line */
- //nextFrmModTime = (uint32)((((frameNum+1)*1000)/rateCtrl->frame_rate) + modTimeRef); /* rec. time */
- //encvid->nextModTime = nextFrmModTime - (encvid->frameInterval>>1) - 1; /* between current and next frame */
-
- encvid->currInput = input;
- encvid->currInput->coding_order = frameNum;
-
-RECALL_INITFRAME:
- /* initialize and analyze the frame */
- status = InitFrame(encvid);
-
- if (status == AVCENC_SUCCESS)
- {
- encvid->enc_state = AVCEnc_Encoding_Frame;
- }
- else if (status == AVCENC_NEW_IDR)
- {
- if (encvid->outOfBandParamSet == TRUE)
- {
- encvid->enc_state = AVCEnc_Encoding_Frame;
- }
- else // assuming that in-band paramset keeps sending new SPS and PPS.
- {
- encvid->enc_state = AVCEnc_Encoding_SPS;
- //video->currSeqParams->seq_parameter_set_id++;
- //if(video->currSeqParams->seq_parameter_set_id > 31) // range check
- {
- video->currSeqParams->seq_parameter_set_id = 0; // reset
- }
- }
-
- video->sliceHdr->idr_pic_id++;
- if (video->sliceHdr->idr_pic_id > 65535) // range check
- {
- video->sliceHdr->idr_pic_id = 0; // reset
- }
- }
- /* the following logics need to be revisited */
- else if (status == AVCENC_PICTURE_READY) // no buffers returned back to the encoder
- {
- encvid->enc_state = AVCEnc_WaitingForBuffer; // Input accepted but can't continue
- // need to free up some memory before proceeding with Encode
- }
-
- return status; // return status, including the AVCENC_FAIL case and all 3 above.
-}
-
-/* ======================================================================== */
-/* Function : PVAVCEncodeNAL() */
-/* Date : 4/29/2004 */
-/* Purpose : To encode one NAL/slice. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, unsigned char *buffer, unsigned int *buf_nal_size, int *nal_type)
-{
- AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
- AVCCommonObj *video = encvid->common;
- AVCEncBitstream *bitstream = encvid->bitstream;
- AVCEnc_Status status;
-
- if (encvid == NULL)
- {
- return AVCENC_UNINITIALIZED;
- }
-
- switch (encvid->enc_state)
- {
- case AVCEnc_Initializing:
- return AVCENC_UNINITIALIZED;
- case AVCEnc_Encoding_SPS:
- /* initialized the structure */
- BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);
- BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_SPS);
-
- /* encode SPS */
- status = EncodeSPS(encvid, bitstream);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- /* closing the NAL with trailing bits */
- status = BitstreamTrailingBits(bitstream, buf_nal_size);
- if (status == AVCENC_SUCCESS)
- {
- encvid->enc_state = AVCEnc_Encoding_PPS;
- video->currPicParams->seq_parameter_set_id = video->currSeqParams->seq_parameter_set_id;
- video->currPicParams->pic_parameter_set_id++;
- *nal_type = AVC_NALTYPE_SPS;
- *buf_nal_size = bitstream->write_pos;
- }
- break;
- case AVCEnc_Encoding_PPS:
- /* initialized the structure */
- BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);
- BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_PPS);
-
- /* encode PPS */
- status = EncodePPS(encvid, bitstream);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- /* closing the NAL with trailing bits */
- status = BitstreamTrailingBits(bitstream, buf_nal_size);
- if (status == AVCENC_SUCCESS)
- {
- if (encvid->outOfBandParamSet == TRUE) // already extract PPS, SPS
- {
- encvid->enc_state = AVCEnc_Analyzing_Frame;
- }
- else // SetInput has been called before SPS and PPS.
- {
- encvid->enc_state = AVCEnc_Encoding_Frame;
- }
-
- *nal_type = AVC_NALTYPE_PPS;
- *buf_nal_size = bitstream->write_pos;
- }
- break;
-
- case AVCEnc_Encoding_Frame:
- /* initialized the structure */
- BitstreamEncInit(bitstream, buffer, *buf_nal_size, encvid->overrunBuffer, encvid->oBSize);
- BitstreamWriteBits(bitstream, 8, (video->nal_ref_idc << 5) | (video->nal_unit_type));
-
- /* Re-order the reference list according to the ref_pic_list_reordering() */
- /* We don't have to reorder the list for the encoder here. This can only be done
- after we encode this slice. We can run thru a second-pass to see if new ordering
- would save more bits. Too much delay !! */
- /* status = ReOrderList(video);*/
- status = InitSlice(encvid);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- /* when we have everything, we encode the slice header */
- status = EncodeSliceHeader(encvid, bitstream);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = AVCEncodeSlice(encvid);
-
- video->slice_id++;
-
- /* closing the NAL with trailing bits */
- BitstreamTrailingBits(bitstream, buf_nal_size);
-
- *buf_nal_size = bitstream->write_pos;
-
- encvid->rateCtrl->numFrameBits += ((*buf_nal_size) << 3);
-
- *nal_type = video->nal_unit_type;
-
- if (status == AVCENC_PICTURE_READY)
- {
- status = RCUpdateFrame(encvid);
- if (status == AVCENC_SKIPPED_PICTURE) /* skip current frame */
- {
- DPBReleaseCurrentFrame(avcHandle, video);
- encvid->enc_state = AVCEnc_Analyzing_Frame;
-
- return status;
- }
-
- /* perform loop-filtering on the entire frame */
- DeblockPicture(video);
-
- /* update the original frame array */
- encvid->prevCodedFrameNum = encvid->currInput->coding_order;
-
- /* store the encoded picture in the DPB buffer */
- StorePictureInDPB(avcHandle, video);
-
- if (video->currPic->isReference)
- {
- video->PrevRefFrameNum = video->sliceHdr->frame_num;
- }
-
- /* update POC related variables */
- PostPOC(video);
-
- encvid->enc_state = AVCEnc_Analyzing_Frame;
- status = AVCENC_PICTURE_READY;
-
- }
- break;
- default:
- status = AVCENC_WRONG_STATE;
- }
-
- return status;
-}
-
-/* ======================================================================== */
-/* Function : PVAVCEncGetOverrunBuffer() */
-/* Purpose : To retrieve the overrun buffer. Check whether overrun buffer */
-/* is used or not before returning */
-/* In/out : */
-/* Return : Pointer to the internal overrun buffer. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
- AVCEncBitstream *bitstream = encvid->bitstream;
-
- if (bitstream->overrunBuffer == bitstream->bitstreamBuffer) /* OB is used */
- {
- return encvid->overrunBuffer;
- }
- else
- {
- return NULL;
- }
-}
-
-
-/* ======================================================================== */
-/* Function : PVAVCEncGetRecon() */
-/* Date : 4/29/2004 */
-/* Purpose : To retrieve the most recently encoded frame. */
-/* assume that user will make a copy if they want to hold on */
-/* to it. Otherwise, it is not guaranteed to be reserved. */
-/* Most applications prefer to see original frame rather than */
-/* reconstructed frame. So, we are staying aware from complex */
-/* buffering mechanism. If needed, can be added later. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon)
-{
- AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
- AVCCommonObj *video = encvid->common;
- AVCFrameStore *currFS = video->currFS;
-
- if (encvid == NULL)
- {
- return AVCENC_UNINITIALIZED;
- }
-
- recon->YCbCr[0] = currFS->frame.Sl;
- recon->YCbCr[1] = currFS->frame.Scb;
- recon->YCbCr[2] = currFS->frame.Scr;
- recon->height = currFS->frame.height;
- recon->pitch = currFS->frame.pitch;
- recon->disp_order = currFS->PicOrderCnt;
- recon->coding_order = currFS->FrameNum;
- recon->id = (intptr_t) currFS->base_dpb; /* use the pointer as the id */
-
- currFS->IsOutputted |= 1;
-
- return AVCENC_SUCCESS;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(recon);
-
- return AVCENC_SUCCESS; //for now
-}
-
-/* ======================================================================== */
-/* Function : PVAVCCleanUpEncoder() */
-/* Date : 4/18/2004 */
-/* Purpose : To clean up memories allocated by PVAVCEncInitialize() */
-/* In/out : */
-/* Return : AVCENC_SUCCESS for success. */
-/* Modified : */
-/* ======================================================================== */
-OSCL_EXPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCCommonObj *video;
- uint32 *userData = (uint32*) avcHandle->userData;
-
- if (encvid != NULL)
- {
- CleanMotionSearchModule(avcHandle);
-
- CleanupRateControlModule(avcHandle);
-
- if (encvid->functionPointer != NULL)
- {
- avcHandle->CBAVC_Free(userData, encvid->functionPointer);
- }
-
- if (encvid->min_cost)
- {
- avcHandle->CBAVC_Free(userData, encvid->min_cost);
- }
-
- if (encvid->intraSearch)
- {
- avcHandle->CBAVC_Free(userData, encvid->intraSearch);
- }
-
- if (encvid->mot16x16)
- {
- avcHandle->CBAVC_Free(userData, encvid->mot16x16);
- }
-
- if (encvid->rateCtrl)
- {
- avcHandle->CBAVC_Free(userData, encvid->rateCtrl);
- }
-
- if (encvid->overrunBuffer)
- {
- avcHandle->CBAVC_Free(userData, encvid->overrunBuffer);
- }
-
- video = encvid->common;
- if (video != NULL)
- {
- if (video->MbToSliceGroupMap)
- {
- avcHandle->CBAVC_Free(userData, video->MbToSliceGroupMap);
- }
- if (video->mblock != NULL)
- {
- avcHandle->CBAVC_Free(userData, video->mblock);
- }
- if (video->decPicBuf != NULL)
- {
- CleanUpDPB(avcHandle, video);
- avcHandle->CBAVC_Free(userData, video->decPicBuf);
- }
- if (video->sliceHdr != NULL)
- {
- avcHandle->CBAVC_Free(userData, video->sliceHdr);
- }
- if (video->currPicParams != NULL)
- {
- if (video->currPicParams->slice_group_id)
- {
- avcHandle->CBAVC_Free(userData, video->currPicParams->slice_group_id);
- }
-
- avcHandle->CBAVC_Free(userData, video->currPicParams);
- }
- if (video->currSeqParams != NULL)
- {
- avcHandle->CBAVC_Free(userData, video->currSeqParams);
- }
- if (encvid->bitstream != NULL)
- {
- avcHandle->CBAVC_Free(userData, encvid->bitstream);
- }
- if (video != NULL)
- {
- avcHandle->CBAVC_Free(userData, video);
- }
- }
-
- avcHandle->CBAVC_Free(userData, encvid);
-
- avcHandle->AVCObject = NULL;
- }
-
- return ;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(bitrate);
-
- return AVCENC_FAIL;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(num);
- OSCL_UNUSED_ARG(denom);
-
- return AVCENC_FAIL;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(IDRInterval);
-
- return AVCENC_FAIL;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle)
-{
- OSCL_UNUSED_ARG(avcHandle);
-
- return AVCENC_FAIL;
-}
-
-OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB)
-{
- OSCL_UNUSED_ARG(avcHandle);
- OSCL_UNUSED_ARG(numMB);
-
- return AVCENC_FAIL;
-}
-
-void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
-
- avcStats->avgFrameQP = GetAvgFrameQP(rateCtrl);
- avcStats->numIntraMBs = encvid->numIntraMB;
-
- return ;
-}
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_api.h b/media/libstagefright/codecs/avc/enc/src/avcenc_api.h
deleted file mode 100644
index 6841ec3..0000000
--- a/media/libstagefright/codecs/avc/enc/src/avcenc_api.h
+++ /dev/null
@@ -1,323 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains application function interfaces to the AVC encoder library
-and necessary type defitionitions and enumerations.
-@publishedAll
-*/
-
-#ifndef AVCENC_API_H_INCLUDED
-#define AVCENC_API_H_INCLUDED
-
-#ifndef AVCAPI_COMMON_H_INCLUDED
-#include "avcapi_common.h"
-#endif
-
-// For memset, etc
-#include <string.h>
-
-/**
- This enumeration is used for the status returned from the library interface.
-*/
-typedef enum
-{
- /**
- Fail information, need to add more error code for more specific info
- */
- AVCENC_TRAILINGONES_FAIL = -35,
- AVCENC_SLICE_EMPTY = -34,
- AVCENC_POC_FAIL = -33,
- AVCENC_CONSECUTIVE_NONREF = -32,
- AVCENC_CABAC_FAIL = -31,
- AVCENC_PRED_WEIGHT_TAB_FAIL = -30,
- AVCENC_DEC_REF_PIC_MARK_FAIL = -29,
- AVCENC_SPS_FAIL = -28,
- AVCENC_BITSTREAM_BUFFER_FULL = -27,
- AVCENC_BITSTREAM_INIT_FAIL = -26,
- AVCENC_CHROMA_QP_FAIL = -25,
- AVCENC_INIT_QS_FAIL = -24,
- AVCENC_INIT_QP_FAIL = -23,
- AVCENC_WEIGHTED_BIPRED_FAIL = -22,
- AVCENC_INVALID_INTRA_PERIOD = -21,
- AVCENC_INVALID_CHANGE_RATE = -20,
- AVCENC_INVALID_BETA_OFFSET = -19,
- AVCENC_INVALID_ALPHA_OFFSET = -18,
- AVCENC_INVALID_DEBLOCK_IDC = -17,
- AVCENC_INVALID_REDUNDANT_PIC = -16,
- AVCENC_INVALID_FRAMERATE = -15,
- AVCENC_INVALID_NUM_SLICEGROUP = -14,
- AVCENC_INVALID_POC_LSB = -13,
- AVCENC_INVALID_NUM_REF = -12,
- AVCENC_INVALID_FMO_TYPE = -11,
- AVCENC_ENCPARAM_MEM_FAIL = -10,
- AVCENC_LEVEL_NOT_SUPPORTED = -9,
- AVCENC_LEVEL_FAIL = -8,
- AVCENC_PROFILE_NOT_SUPPORTED = -7,
- AVCENC_TOOLS_NOT_SUPPORTED = -6,
- AVCENC_WRONG_STATE = -5,
- AVCENC_UNINITIALIZED = -4,
- AVCENC_ALREADY_INITIALIZED = -3,
- AVCENC_NOT_SUPPORTED = -2,
- AVCENC_MEMORY_FAIL = AVC_MEMORY_FAIL,
- AVCENC_FAIL = AVC_FAIL,
- /**
- Generic success value
- */
- AVCENC_SUCCESS = AVC_SUCCESS,
- AVCENC_PICTURE_READY = 2,
- AVCENC_NEW_IDR = 3, /* upon getting this, users have to call PVAVCEncodeSPS and PVAVCEncodePPS to get a new SPS and PPS*/
- AVCENC_SKIPPED_PICTURE = 4 /* continuable error message */
-
-} AVCEnc_Status;
-
-#define MAX_NUM_SLICE_GROUP 8 /* maximum for all the profiles */
-
-/**
-This structure contains the encoding parameters.
-*/
-typedef struct tagAVCEncParam
-{
- /* if profile/level is set to zero, encoder will choose the closest one for you */
- AVCProfile profile; /* profile of the bitstream to be compliant with*/
- AVCLevel level; /* level of the bitstream to be compliant with*/
-
- int width; /* width of an input frame in pixel */
- int height; /* height of an input frame in pixel */
-
- int poc_type; /* picture order count mode, 0,1 or 2 */
- /* for poc_type == 0 */
- uint log2_max_poc_lsb_minus_4; /* specify maximum value of POC Lsb, range 0..12*/
- /* for poc_type == 1 */
- uint delta_poc_zero_flag; /* delta POC always zero */
- int offset_poc_non_ref; /* offset for non-reference pic */
- int offset_top_bottom; /* offset between top and bottom field */
- uint num_ref_in_cycle; /* number of reference frame in one cycle */
- int *offset_poc_ref; /* array of offset for ref pic, dimension [num_ref_in_cycle] */
-
- int num_ref_frame; /* number of reference frame used */
- int num_slice_group; /* number of slice group */
- int fmo_type; /* 0: interleave, 1: dispersed, 2: foreground with left-over
- 3: box-out, 4:raster scan, 5:wipe, 6:explicit */
- /* for fmo_type == 0 */
- uint run_length_minus1[MAX_NUM_SLICE_GROUP]; /* array of size num_slice_group, in round robin fasion */
- /* fmo_type == 2*/
- uint top_left[MAX_NUM_SLICE_GROUP-1]; /* array of co-ordinates of each slice_group */
- uint bottom_right[MAX_NUM_SLICE_GROUP-1]; /* except the last one which is the background. */
- /* fmo_type == 3,4,5 */
- AVCFlag change_dir_flag; /* slice group change direction flag */
- uint change_rate_minus1;
- /* fmo_type == 6 */
- uint *slice_group; /* array of size MBWidth*MBHeight */
-
- AVCFlag db_filter; /* enable deblocking loop filter */
- int disable_db_idc; /* 0: filter everywhere, 1: no filter, 2: no filter across slice boundary */
- int alpha_offset; /* alpha offset range -6,...,6 */
- int beta_offset; /* beta offset range -6,...,6 */
-
- AVCFlag constrained_intra_pred; /* constrained intra prediction flag */
-
- AVCFlag auto_scd; /* scene change detection on or off */
- int idr_period; /* idr frame refresh rate in number of target encoded frame (no concept of actual time).*/
- int intramb_refresh; /* minimum number of intra MB per frame */
- AVCFlag data_par; /* enable data partitioning */
-
- AVCFlag fullsearch; /* enable full-pel full-search mode */
- int search_range; /* search range for motion vector in (-search_range,+search_range) pixels */
- AVCFlag sub_pel; /* enable sub pel prediction */
- AVCFlag submb_pred; /* enable sub MB partition mode */
- AVCFlag rdopt_mode; /* RD optimal mode selection */
- AVCFlag bidir_pred; /* enable bi-directional for B-slice, this flag forces the encoder to encode
- any frame with POC less than the previously encoded frame as a B-frame.
- If it's off, then such frames will remain P-frame. */
-
- AVCFlag rate_control; /* rate control enable, on: RC on, off: constant QP */
- int initQP; /* initial QP */
- uint32 bitrate; /* target encoding bit rate in bits/second */
- uint32 CPB_size; /* coded picture buffer in number of bits */
- uint32 init_CBP_removal_delay; /* initial CBP removal delay in msec */
-
- uint32 frame_rate; /* frame rate in the unit of frames per 1000 second */
- /* note, frame rate is only needed by the rate control, AVC is timestamp agnostic. */
-
- AVCFlag out_of_band_param_set; /* flag to set whether param sets are to be retrieved up front or not */
-
- AVCFlag use_overrun_buffer; /* do not throw away the frame if output buffer is not big enough.
- copy excess bits to the overrun buffer */
-} AVCEncParams;
-
-
-/**
-This structure contains current frame encoding statistics for debugging purpose.
-*/
-typedef struct tagAVCEncFrameStats
-{
- int avgFrameQP; /* average frame QP */
- int numIntraMBs; /* number of intra MBs */
- int numFalseAlarm;
- int numMisDetected;
- int numDetected;
-
-} AVCEncFrameStats;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
- /** THE FOLLOWINGS ARE APIS */
- /**
- This function initializes the encoder library. It verifies the validity of the
- encoding parameters against the specified profile/level and the list of supported
- tools by this library. It allocates necessary memories required to perform encoding.
- For re-encoding application, if users want to setup encoder in a more precise way,
- users can give the external SPS and PPS to the encoder to follow.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "encParam" "Pointer to the encoding parameter structure."
- \param "extSPS" "External SPS used for re-encoding purpose. NULL if not present"
- \param "extPPS" "External PPS used for re-encoding purpose. NULL if not present"
- \return "AVCENC_SUCCESS for success,
- AVCENC_NOT_SUPPORTED for the use of unsupported tools,
- AVCENC_MEMORY_FAIL for memory allocation failure,
- AVCENC_FAIL for generic failure."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam, void* extSPS, void* extPPS);
-
-
- /**
- Since the output buffer size is not known prior to encoding a frame, users need to
- allocate big enough buffer otherwise, that frame will be dropped. This function returns
- the size of the output buffer to be allocated by the users that guarantees to hold one frame.
- It follows the CPB spec for a particular level. However, when the users set use_overrun_buffer
- flag, this API is useless as excess output bits are saved in the overrun buffer waiting to be
- copied out in small chunks, i.e. users can allocate any size of output buffer.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "size" "Pointer to the size to be modified."
- \return "AVCENC_SUCCESS for success, AVCENC_UNINITIALIZED when level is not known.
- */
-
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size);
-
- /**
- Users call this function to provide an input structure to the encoder library which will keep
- a list of input structures it receives in case the users call this function many time before
- calling PVAVCEncodeSlice. The encoder library will encode them according to the frame_num order.
- Users should not modify the content of a particular frame until this frame is encoded and
- returned thru CBAVCEnc_ReturnInput() callback function.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "input" "Pointer to the input structure."
- \return "AVCENC_SUCCESS for success,
- AVCENC_FAIL if the encoder is not in the right state to take a new input frame.
- AVCENC_NEW_IDR for the detection or determination of a new IDR, with this status,
- the returned NAL is an SPS NAL,
- AVCENC_NO_PICTURE if the input frame coding timestamp is too early, users must
- get next frame or adjust the coding timestamp."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input);
-
- /**
- This function is called to encode a NAL unit which can be an SPS NAL, a PPS NAL or
- a VCL (video coding layer) NAL which contains one slice of data. It could be a
- fixed number of macroblocks, as specified in the encoder parameters set, or the
- maximum number of macroblocks fitted into the given input argument "buffer". The
- input frame is taken from the oldest unencoded input frame retrieved by users by
- PVAVCEncGetInput API.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "buffer" "Pointer to the output AVC bitstream buffer, the format will be EBSP,
- not RBSP."
- \param "buf_nal_size" "As input, the size of the buffer in bytes.
- This is the physical limitation of the buffer. As output, the size of the EBSP."
- \param "nal_type" "Pointer to the NAL type of the returned buffer."
- \return "AVCENC_SUCCESS for success of encoding one slice,
- AVCENC_PICTURE_READY for the completion of a frame encoding,
- AVCENC_FAIL for failure (this should not occur, though)."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, uint8 *buffer, uint *buf_nal_size, int *nal_type);
-
- /**
- This function sniffs the nal_unit_type such that users can call corresponding APIs.
- This function is identical to PVAVCDecGetNALType() in the decoder.
- \param "bitstream" "Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.)."
- \param "size" "size of the bitstream (NumBytesInNALunit + 1)."
- \param "nal_unit_type" "Pointer to the return value of nal unit type."
- \return "AVCENC_SUCCESS if success, AVCENC_FAIL otherwise."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc);
-
- /**
- This function returns the pointer to internal overrun buffer. Users can call this to query
- whether the overrun buffer has been used to encode the current NAL.
- \param "avcHandle" "Pointer to the handle."
- \return "Pointer to overrun buffer if it is used, otherwise, NULL."
- */
- OSCL_IMPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle);
-
- /**
- This function returns the reconstructed frame of the most recently encoded frame.
- Note that this frame is not returned to the users yet. Users should only read the
- content of this frame.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "output" "Pointer to the input structure."
- \return "AVCENC_SUCCESS for success, AVCENC_NO_PICTURE if no picture to be outputted."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon);
-
- /**
- This function is used to return the recontructed frame back to the AVC encoder library
- in order to be re-used for encoding operation. If users want the content of it to remain
- unchanged for a long time, they should make a copy of it and release the memory back to
- the encoder. The encoder relies on the id element in the AVCFrameIO structure,
- thus users should not change the id value.
- \param "avcHandle" "Handle to the AVC decoder library object."
- \param "output" "Pointer to the AVCFrameIO structure."
- \return "AVCENC_SUCCESS for success, AVCENC_FAIL for fail for id not found."
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon);
-
- /**
- This function performs clean up operation including memory deallocation.
- The encoder will also clear the list of input structures it has not released.
- This implies that users must keep track of the number of input structure they have allocated
- and free them accordingly.
- \param "avcHandle" "Handle to the AVC encoder library object."
- */
- OSCL_IMPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle);
-
- /**
- This function extracts statistics of the current frame. If the encoder has not finished
- with the current frame, the result is not accurate.
- \param "avcHandle" "Handle to the AVC encoder library object."
- \param "avcStats" "Pointer to AVCEncFrameStats structure."
- \return "void."
- */
- void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats);
-
- /**
- These functions are used for the modification of encoding parameters.
- To be polished.
- */
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate);
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom);
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval);
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle);
- OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB);
-
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* _AVCENC_API_H_ */
-
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_int.h b/media/libstagefright/codecs/avc/enc/src/avcenc_int.h
deleted file mode 100644
index 22042a5..0000000
--- a/media/libstagefright/codecs/avc/enc/src/avcenc_int.h
+++ /dev/null
@@ -1,471 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains application function interfaces to the AVC encoder library
-and necessary type defitionitions and enumerations.
-@publishedAll
-*/
-
-#ifndef AVCENC_INT_H_INCLUDED
-#define AVCENC_INT_H_INCLUDED
-
-#ifndef AVCINT_COMMON_H_INCLUDED
-#include "avcint_common.h"
-#endif
-#ifndef AVCENC_API_H_INCLUDED
-#include "avcenc_api.h"
-#endif
-
-typedef float OsclFloat;
-
-/* Definition for the structures below */
-#define DEFAULT_ATTR 0 /* default memory attribute */
-#define MAX_INPUT_FRAME 30 /* some arbitrary number, it can be much higher than this. */
-#define MAX_REF_FRAME 16 /* max size of the RefPicList0 and RefPicList1 */
-#define MAX_REF_PIC_LIST 33
-
-#define MIN_QP 0
-#define MAX_QP 51
-#define SHIFT_QP 12
-#define LAMBDA_ACCURACY_BITS 16
-#define LAMBDA_FACTOR(lambda) ((int)((double)(1<<LAMBDA_ACCURACY_BITS)*(lambda)+0.5))
-
-
-#define DISABLE_THRESHOLDING 0
-// for better R-D performance
-#define _LUMA_COEFF_COST_ 4 //!< threshold for luma coeffs
-#define _CHROMA_COEFF_COST_ 4 //!< threshold for chroma coeffs, used to be 7
-#define _LUMA_MB_COEFF_COST_ 5 //!< threshold for luma coeffs of inter Macroblocks
-#define _LUMA_8x8_COEFF_COST_ 5 //!< threshold for luma coeffs of 8x8 Inter Partition
-#define MAX_VALUE 999999 //!< used for start value for some variables
-
-#define WEIGHTED_COST(factor,bits) (((factor)*(bits))>>LAMBDA_ACCURACY_BITS)
-#define MV_COST(f,s,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[((cx)<<(s))-(px)]+mvbits[((cy)<<((s)))-(py)]))
-#define MV_COST_S(f,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[(cx)-(px)]+mvbits[(cy)-(py)]))
-
-/* for sub-pel search and interpolation */
-#define SUBPEL_PRED_BLK_SIZE 576 // 24x24
-#define REF_CENTER 75
-#define V2Q_H0Q 1
-#define V0Q_H2Q 2
-#define V2Q_H2Q 3
-
-/*
-#define V3Q_H0Q 1
-#define V3Q_H1Q 2
-#define V0Q_H1Q 3
-#define V1Q_H1Q 4
-#define V1Q_H0Q 5
-#define V1Q_H3Q 6
-#define V0Q_H3Q 7
-#define V3Q_H3Q 8
-#define V2Q_H3Q 9
-#define V2Q_H0Q 10
-#define V2Q_H1Q 11
-#define V2Q_H2Q 12
-#define V3Q_H2Q 13
-#define V0Q_H2Q 14
-#define V1Q_H2Q 15
-*/
-
-
-#define DEFAULT_OVERRUN_BUFFER_SIZE 1000
-
-// associated with the above cost model
-const uint8 COEFF_COST[2][16] =
-{
- {3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
-};
-
-
-
-//! convert from H.263 QP to H.264 quant given by: quant=pow(2,QP/6)
-const int QP2QUANT[40] =
-{
- 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 4, 4, 4, 5, 6,
- 6, 7, 8, 9, 10, 11, 13, 14,
- 16, 18, 20, 23, 25, 29, 32, 36,
- 40, 45, 51, 57, 64, 72, 81, 91
-};
-
-
-/**
-This enumeration keeps track of the internal status of the encoder whether it is doing
-something. The encoding flow follows the order in which these states are.
-@publishedAll
-*/
-typedef enum
-{
- AVCEnc_Initializing = 0,
- AVCEnc_Encoding_SPS,
- AVCEnc_Encoding_PPS,
- AVCEnc_Analyzing_Frame,
- AVCEnc_WaitingForBuffer, // pending state
- AVCEnc_Encoding_Frame,
-} AVCEnc_State ;
-
-/**
-Bitstream structure contains bitstream related parameters such as the pointer
-to the buffer, the current byte position and bit position. The content of the
-bitstreamBuffer will be in EBSP format as the emulation prevention codes are
-automatically inserted as the RBSP is recorded.
-@publishedAll
-*/
-typedef struct tagEncBitstream
-{
- uint8 *bitstreamBuffer; /* pointer to buffer memory */
- int buf_size; /* size of the buffer memory */
- int write_pos; /* next position to write to bitstreamBuffer */
- int count_zeros; /* count number of consecutive zero */
- uint current_word; /* byte-swapped (MSB left) current word to write to buffer */
- int bit_left; /* number of bit left in current_word */
- uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/
- int oBSize; /* size of allocated overrun buffer */
- void *encvid; /* pointer to the main object */
-
-} AVCEncBitstream;
-
-/**
-This structure is used for rate control purpose and other performance related control
-variables such as, RD cost, statistics, motion search stuffs, etc.
-should be in this structure.
-@publishedAll
-*/
-
-
-typedef struct tagRDInfo
-{
- int QP;
- int actual_bits;
- OsclFloat mad;
- OsclFloat R_D;
-} RDInfo;
-
-typedef struct tagMultiPass
-{
- /* multipass rate control data */
- int target_bits; /* target bits for current frame, = rc->T */
- int actual_bits; /* actual bits for current frame obtained after encoding, = rc->Rc*/
- int QP; /* quantization level for current frame, = rc->Qc*/
- int prev_QP; /* quantization level for previous frame */
- int prev_prev_QP; /* quantization level for previous frame before last*/
- OsclFloat mad; /* mad for current frame, = video->avgMAD*/
- int bitrate; /* bitrate for current frame */
- OsclFloat framerate; /* framerate for current frame*/
-
- int nRe_Quantized; /* control variable for multipass encoding, */
- /* 0 : first pass */
- /* 1 : intermediate pass(quantization and VLC loop only) */
- /* 2 : final pass(de-quantization, idct, etc) */
- /* 3 : macroblock level rate control */
-
- int encoded_frames; /* counter for all encoded frames */
- int re_encoded_frames; /* counter for all multipass encoded frames*/
- int re_encoded_times; /* counter for all times of multipass frame encoding */
-
- /* Multiple frame prediction*/
- RDInfo **pRDSamples; /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/
- int framePos; /* specific position in previous multiple frames*/
- int frameRange; /* number of overall previous multiple frames */
- int samplesPerFrame[30]; /* number of samples per frame, 30->30fps */
-
- /* Bit allocation for scene change frames and high motion frames */
- OsclFloat sum_mad;
- int counter_BTsrc; /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */
- int counter_BTdst; /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */
- OsclFloat sum_QP;
- int diff_counter; /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */
-
- /* For target bitrate or framerate update */
- OsclFloat target_bits_per_frame; /* = C = bitrate/framerate */
- OsclFloat target_bits_per_frame_prev; /* previous C */
- OsclFloat aver_mad; /* so-far average mad could replace sum_mad */
- OsclFloat aver_mad_prev; /* previous average mad */
- int overlapped_win_size; /* transition period of time */
- int encoded_frames_prev; /* previous encoded_frames */
-} MultiPass;
-
-
-typedef struct tagdataPointArray
-{
- int Qp;
- int Rp;
- OsclFloat Mp; /* for MB-based RC */
- struct tagdataPointArray *next;
- struct tagdataPointArray *prev;
-} dataPointArray;
-
-typedef struct tagAVCRateControl
-{
-
- /* these parameters are initialized by the users AVCEncParams */
- /* bitrate-robustness tradeoff */
- uint scdEnable; /* enable scene change detection */
- int idrPeriod; /* IDR period in number of frames */
- int intraMBRate; /* intra MB refresh rate per frame */
- uint dpEnable; /* enable data partitioning */
-
- /* quality-complexity tradeoff */
- uint subPelEnable; /* enable quarter pel search */
- int mvRange; /* motion vector search range in +/- pixel */
- uint subMBEnable; /* enable sub MB prediction mode (4x4, 4x8, 8x4) */
- uint rdOptEnable; /* enable RD-opt mode selection */
- uint twoPass; /* flag for 2 pass encoding ( for future )*/
- uint bidirPred; /* bi-directional prediction for B-frame. */
-
- uint rcEnable; /* enable rate control, '1' on, '0' const QP */
- int initQP; /* initial QP */
-
- /* note the following 3 params are for HRD, these triplets can be a series
- of triplets as the generalized HRD allows. SEI message must be generated in this case. */
- /* We no longer have to differentiate between CBR and VBR. The users to the
- AVC encoder lib will do the mapping from CBR/VBR to these parameters. */
- int32 bitRate; /* target bit rate for the overall clip in bits/second*/
- int32 cpbSize; /* coded picture buffer size in bytes */
- int32 initDelayOffset; /* initial CBP removal delay in bits */
-
- OsclFloat frame_rate; /* frame rate */
- int srcInterval; /* source frame rate in msec */
- int basicUnit; /* number of macroblocks per BU */
-
- /* Then internal parameters for the operation */
- uint first_frame; /* a flag for the first frame */
- int lambda_mf; /* for example */
- int totalSAD; /* SAD of current frame */
-
- /*******************************************/
- /* this part comes from MPEG4 rate control */
- int alpha; /* weight for I frame */
- int Rs; /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */
- int Rc; /*bits used for the current frame. It is the bit count obtained after encoding. */
- int Rp; /*bits to be removed from the buffer per picture. */
- /*? is this the average one, or just the bits coded for the previous frame */
- int Rps; /*bit to be removed from buffer per src frame */
- OsclFloat Ts; /*number of seconds for the sequence (or segment). e.g., 10 sec */
- OsclFloat Ep;
- OsclFloat Ec; /*mean absolute difference for the current frame after motion compensation.*/
- /*If the macroblock is intra coded, the original spatial pixel values are summed.*/
- int Qc; /*quantization level used for the current frame. */
- int Nr; /*number of P frames remaining for encoding.*/
- int Rr; /*number of bits remaining for encoding this sequence (or segment).*/
- int Rr_Old;
- int T; /*target bit to be used for the current frame.*/
- int S; /*number of bits used for encoding the previous frame.*/
- int Hc; /*header and motion vector bits used in the current frame. It includes all the information except to the residual information.*/
- int Hp; /*header and motion vector bits used in the previous frame. It includes all the information except to the residual information.*/
- int Ql; /*quantization level used in the previous frame */
- int Bs; /*buffer size e.g., R/2 */
- int B; /*current buffer level e.g., R/4 - start from the middle of the buffer */
- OsclFloat X1;
- OsclFloat X2;
- OsclFloat X11;
- OsclFloat M; /*safe margin for the buffer */
- OsclFloat smTick; /*ratio of src versus enc frame rate */
- double remnant; /*remainder frame of src/enc frame for fine frame skipping */
- int timeIncRes; /* vol->timeIncrementResolution */
-
- dataPointArray *end; /*quantization levels for the past (20) frames */
-
- int frameNumber; /* ranging from 0 to 20 nodes*/
- int w;
- int Nr_Original;
- int Nr_Old, Nr_Old2;
- int skip_next_frame;
- int Qdep; /* smooth Q adjustment */
- int VBR_Enabled;
-
- int totalFrameNumber; /* total coded frames, for debugging!!*/
-
- char oFirstTime;
-
- int numFrameBits; /* keep track of number of bits of the current frame */
- int NumberofHeaderBits;
- int NumberofTextureBits;
- int numMBHeaderBits;
- int numMBTextureBits;
- double *MADofMB;
- int32 bitsPerFrame;
-
- /* BX rate control, something like TMN8 rate control*/
-
- MultiPass *pMP;
-
- int TMN_W;
- int TMN_TH;
- int VBV_fullness;
- int max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/
- int encoded_frames; /* counter for all encoded frames */
- int low_bound; /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */
- int VBV_fullness_offset; /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/
- /* End BX */
-
-} AVCRateControl;
-
-
-/**
-This structure is for the motion vector information. */
-typedef struct tagMV
-{
- int x;
- int y;
- uint sad;
-} AVCMV;
-
-/**
-This structure contains function pointers for different platform dependent implementation of
-functions. */
-typedef struct tagAVCEncFuncPtr
-{
-
- int (*SAD_MB_HalfPel[4])(uint8*, uint8*, int, void *);
- int (*SAD_Macroblock)(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
-
-} AVCEncFuncPtr;
-
-/**
-This structure contains information necessary for correct padding.
-*/
-typedef struct tagPadInfo
-{
- int i;
- int width;
- int j;
- int height;
-} AVCPadInfo;
-
-
-#ifdef HTFM
-typedef struct tagHTFM_Stat
-{
- int abs_dif_mad_avg;
- uint countbreak;
- int offsetArray[16];
- int offsetRef[16];
-} HTFM_Stat;
-#endif
-
-
-/**
-This structure is the main object for AVC encoder library providing access to all
-global variables. It is allocated at PVAVCInitEncoder and freed at PVAVCCleanUpEncoder.
-@publishedAll
-*/
-typedef struct tagEncObject
-{
-
- AVCCommonObj *common;
-
- AVCEncBitstream *bitstream; /* for current NAL */
- uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/
- int oBSize; /* size of allocated overrun buffer */
-
- /* rate control */
- AVCRateControl *rateCtrl; /* pointer to the rate control structure */
-
- /* encoding operation */
- AVCEnc_State enc_state; /* encoding state */
-
- AVCFrameIO *currInput; /* pointer to the current input frame */
-
- int currSliceGroup; /* currently encoded slice group id */
-
- int level[24][16], run[24][16]; /* scratch memory */
- int leveldc[16], rundc[16]; /* for DC component */
- int levelcdc[16], runcdc[16]; /* for chroma DC component */
- int numcoefcdc[2]; /* number of coefficient for chroma DC */
- int numcoefdc; /* number of coefficients for DC component */
-
- int qp_const;
- int qp_const_c;
- /********* intra prediction scratch memory **********************/
- uint8 pred_i16[AVCNumI16PredMode][256]; /* save prediction for MB */
- uint8 pred_i4[AVCNumI4PredMode][16]; /* save prediction for blk */
- uint8 pred_ic[AVCNumIChromaMode][128]; /* for 2 chroma */
-
- int mostProbableI4Mode[16]; /* in raster scan order */
- /********* motion compensation related variables ****************/
- AVCMV *mot16x16; /* Saved motion vectors for 16x16 block*/
- AVCMV(*mot16x8)[2]; /* Saved motion vectors for 16x8 block*/
- AVCMV(*mot8x16)[2]; /* Saved motion vectors for 8x16 block*/
- AVCMV(*mot8x8)[4]; /* Saved motion vectors for 8x8 block*/
-
- /********* subpel position **************************************/
- uint32 subpel_pred[SUBPEL_PRED_BLK_SIZE/*<<2*/]; /* all 16 sub-pel positions */
- uint8 *hpel_cand[9]; /* pointer to half-pel position */
- int best_hpel_pos; /* best position */
- uint8 qpel_cand[8][24*16]; /* pointer to quarter-pel position */
- int best_qpel_pos;
- uint8 *bilin_base[9][4]; /* pointer to 4 position at top left of bilinear quarter-pel */
-
- /* need for intra refresh rate */
- uint8 *intraSearch; /* Intra Array for MBs to be intra searched */
- uint firstIntraRefreshMBIndx; /* keep track for intra refresh */
-
- int i4_sad; /* temporary for i4 mode SAD */
- int *min_cost; /* Minimum cost for the all MBs */
- int lambda_mode; /* Lagrange parameter for mode selection */
- int lambda_motion; /* Lagrange parameter for MV selection */
-
- uint8 *mvbits_array; /* Table for bits spent in the cost funciton */
- uint8 *mvbits; /* An offset to the above array. */
-
- /* to speedup the SAD calculation */
- void *sad_extra_info;
- uint8 currYMB[256]; /* interleaved current macroblock in HTFM order */
-
-#ifdef HTFM
- int nrmlz_th[48]; /* Threshold for fast SAD calculation using HTFM */
- HTFM_Stat htfm_stat; /* For statistics collection */
-#endif
-
- /* statistics */
- int numIntraMB; /* keep track of number of intra MB */
-
- /* encoding complexity control */
- uint fullsearch_enable; /* flag to enable full-pel full-search */
-
- /* misc.*/
- bool outOfBandParamSet; /* flag to enable out-of-band param set */
-
- AVCSeqParamSet extSPS; /* for external SPS */
- AVCPicParamSet extPPS; /* for external PPS */
-
- /* time control */
- uint32 prevFrameNum; /* previous frame number starting from modTimeRef */
- uint32 modTimeRef; /* Reference modTime update every I-Vop*/
- uint32 wrapModTime; /* Offset to modTime Ref, rarely used */
-
- uint prevProcFrameNum; /* previously processed frame number, could be skipped */
- uint prevCodedFrameNum; /* previously encoded frame number */
- /* POC related variables */
- uint32 dispOrdPOCRef; /* reference POC is displayer order unit. */
-
- /* Function pointers */
- AVCEncFuncPtr *functionPointer; /* store pointers to platform specific functions */
-
- /* Application control data */
- AVCHandle *avcHandle;
-
-
-} AVCEncObject;
-
-
-#endif /*AVCENC_INT_H_INCLUDED*/
-
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h b/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h
deleted file mode 100644
index 17e28ef..0000000
--- a/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h
+++ /dev/null
@@ -1,1020 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
-This file contains declarations of internal functions for AVC decoder library.
-@publishedAll
-*/
-#ifndef AVCENC_LIB_H_INCLUDED
-#define AVCENC_LIB_H_INCLUDED
-
-#ifndef AVCLIB_COMMON_H_INCLUDED
-#include "avclib_common.h"
-#endif
-#ifndef AVCENC_INT_H_INCLUDED
-#include "avcenc_int.h"
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
- /*------------- block.c -------------------------*/
-
- /**
- This function perform residue calculation, transform, quantize, inverse quantize,
- inverse transform and residue compensation on a 4x4 block.
- \param "encvid" "Pointer to AVCEncObject."
- \param "blkidx" "raster scan block index of the current 4x4 block."
- \param "cur" "Pointer to the reconstructed block."
- \param "org" "Pointer to the original block."
- \param "coef_cost" "Pointer to the coefficient cost to be filled in and returned."
- \return "Number of non-zero coefficients."
- */
- int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost);
-
- /**
- This function performs IDCT on an INTER macroblock.
- \param "video" "Pointer to AVCCommonObj."
- \param "curL" "Pointer to the origin of the macroblock on the current frame."
- \param "currMB" "Pointer to the AVCMacroblock structure."
- \param "picPitch" "Pitch of the current frame."
- \return "void".
- */
- void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch);
-
- /**
- This function perform residue calculation, transform, quantize, inverse quantize,
- inverse transform and residue compensation on a macroblock.
- \param "encvid" "Pointer to AVCEncObject."
- \param "curL" "Pointer to the reconstructed MB."
- \param "orgL" "Pointer to the original MB."
- \return "void"
- */
- void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL);
-
- /**
- This function perform residue calculation, transform, quantize, inverse quantize,
- inverse transform and residue compensation for chroma components of an MB.
- \param "encvid" "Pointer to AVCEncObject."
- \param "curC" "Pointer to the reconstructed MB."
- \param "orgC" "Pointer to the original MB."
- \param "cr" "Flag whether it is Cr or not."
- \return "void"
- */
- void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr);
-
- /*----------- init.c ------------------*/
- /**
- This function interprets the encoding parameters provided by users in encParam.
- The results are kept in AVCEncObject, AVCSeqParamSet, AVCPicParamSet and AVCSliceHeader.
- \param "encvid" "Pointer to AVCEncObject."
- \param "encParam" "Pointer to AVCEncParam."
- \param "extSPS" "External SPS template to be followed. NULL if not present."
- \param "extPPS" "External PPS template to be followed. NULL if not present."
- \return "see AVCEnc_Status."
- */
- AVCEnc_Status SetEncodeParam(AVCHandle *avcHandle, AVCEncParams *encParam,
- void *extSPS, void *extPPS);
-
- /**
- This function verifies the encoding parameters whether they meet the set of supported
- tool by a specific profile. If the profile is not set, it will just find the closest
- profile instead of verifying it.
- \param "video" "Pointer to AVCEncObject."
- \param "seqParam" "Pointer to AVCSeqParamSet."
- \param "picParam" "Pointer to AVCPicParamSet."
- \return "AVCENC_SUCCESS if success,
- AVCENC_PROFILE_NOT_SUPPORTED if the specified profile
- is not supported by this version of the library,
- AVCENC_TOOLS_NOT_SUPPORTED if any of the specified encoding tools are
- not supported by the user-selected profile."
- */
- AVCEnc_Status VerifyProfile(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);
-
- /**
- This function verifies the encoding parameters whether they meet the requirement
- for a specific level. If the level is not set, it will just find the closest
- level instead of verifying it.
- \param "video" "Pointer to AVCEncObject."
- \param "seqParam" "Pointer to AVCSeqParamSet."
- \param "picParam" "Pointer to AVCPicParamSet."
- \return "AVCENC_SUCCESS if success,
- AVCENC_LEVEL_NOT_SUPPORTED if the specified level
- is not supported by this version of the library,
- AVCENC_LEVEL_FAIL if any of the encoding parameters exceed
- the range of the user-selected level."
- */
- AVCEnc_Status VerifyLevel(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);
-
- /**
- This funciton initializes the frame encoding by setting poc/frame_num related parameters. it
- also performs motion estimation.
- \param "encvid" "Pointer to the AVCEncObject."
- \return "AVCENC_SUCCESS if success, AVCENC_NO_PICTURE if there is no input picture
- in the queue to encode, AVCENC_POC_FAIL or AVCENC_CONSECUTIVE_NONREF for POC
- related errors, AVCENC_NEW_IDR if new IDR is detected."
- */
- AVCEnc_Status InitFrame(AVCEncObject *encvid);
-
- /**
- This function initializes slice header related variables and other variables necessary
- for decoding one slice.
- \param "encvid" "Pointer to the AVCEncObject."
- \return "AVCENC_SUCCESS if success."
- */
- AVCEnc_Status InitSlice(AVCEncObject *encvid);
-
- /*----------- header.c ----------------*/
- /**
- This function performs bitstream encoding of the sequence parameter set NAL.
- \param "encvid" "Pointer to the AVCEncObject."
- \param "stream" "Pointer to AVCEncBitstream."
- \return "AVCENC_SUCCESS if success or AVCENC_SPS_FAIL or others for unexpected failure which
- should not occur. The SPS parameters should all be verified before this function is called."
- */
- AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream);
-
- /**
- This function encodes the VUI parameters into the sequence parameter set bitstream.
- \param "stream" "Pointer to AVCEncBitstream."
- \param "vui" "Pointer to AVCVUIParams."
- \return "nothing."
- */
- void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui);
-
- /**
- This function encodes HRD parameters into the sequence parameter set bitstream
- \param "stream" "Pointer to AVCEncBitstream."
- \param "hrd" "Pointer to AVCHRDParams."
- \return "nothing."
- */
- void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd);
-
-
- /**
- This function performs bitstream encoding of the picture parameter set NAL.
- \param "encvid" "Pointer to the AVCEncObject."
- \param "stream" "Pointer to AVCEncBitstream."
- \return "AVCENC_SUCCESS if success or AVCENC_PPS_FAIL or others for unexpected failure which
- should not occur. The SPS parameters should all be verified before this function is called."
- */
- AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream);
-
- /**
- This function encodes slice header information which has been initialized or fabricated
- prior to entering this funciton.
- \param "encvid" "Pointer to the AVCEncObject."
- \param "stream" "Pointer to AVCEncBitstream."
- \return "AVCENC_SUCCESS if success or bitstream fail statuses."
- */
- AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream);
-
- /**
- This function encodes reference picture list reordering relted syntax.
- \param "video" "Pointer to AVCCommonObj."
- \param "stream" "Pointer to AVCEncBitstream."
- \param "sliceHdr" "Pointer to AVCSliceHdr."
- \param "slice_type" "Value of slice_type - 5 if greater than 5."
- \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise."
- */
- AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type);
-
- /**
- This function encodes dec_ref_pic_marking related syntax.
- \param "video" "Pointer to AVCCommonObj."
- \param "stream" "Pointer to AVCEncBitstream."
- \param "sliceHdr" "Pointer to AVCSliceHdr."
- \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise."
- */
- AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr);
-
- /**
- This function initializes the POC related variables and the POC syntax to be encoded
- to the slice header derived from the disp_order and is_reference flag of the original
- input frame to be encoded.
- \param "video" "Pointer to the AVCEncObject."
- \return "AVCENC_SUCCESS if success,
- AVCENC_POC_FAIL if the poc type is undefined or
- AVCENC_CONSECUTIVE_NONREF if there are consecutive non-reference frame for POC type 2."
- */
- AVCEnc_Status InitPOC(AVCEncObject *video);
-
- /**
- This function performs POC related operation after a picture is decoded.
- \param "video" "Pointer to AVCCommonObj."
- \return "AVCENC_SUCCESS"
- */
- AVCEnc_Status PostPOC(AVCCommonObj *video);
-
- /*----------- bitstream_io.c ----------------*/
- /**
- This function initializes the bitstream structure with the information given by
- the users.
- \param "bitstream" "Pointer to the AVCEncBitstream structure."
- \param "buffer" "Pointer to the unsigned char buffer for output."
- \param "buf_size" "The size of the buffer in bytes."
- \param "overrunBuffer" "Pointer to extra overrun buffer."
- \param "oBSize" "Size of overrun buffer in bytes."
- \return "AVCENC_SUCCESS if success, AVCENC_BITSTREAM_INIT_FAIL if fail"
- */
- AVCEnc_Status BitstreamEncInit(AVCEncBitstream *bitstream, uint8 *buffer, int buf_size,
- uint8 *overrunBuffer, int oBSize);
-
- /**
- This function writes the data from the cache into the bitstream buffer. It also adds the
- emulation prevention code if necessary.
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
- */
- AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream);
-
- /**
- This function writes the codeword into the cache which will eventually be written to
- the bitstream buffer.
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \param "nBits" "Number of bits in the codeword."
- \param "code" "The codeword."
- \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
- */
- AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code);
-
- /**
- This function writes one bit of data into the cache which will eventually be written
- to the bitstream buffer.
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \param "code" "The codeword."
- \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
- */
- AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code);
-
- /**
- This function adds trailing bits to the bitstream and reports back the final EBSP size.
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \param "nal_size" "Output the final NAL size."
- \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
- */
- AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size);
-
- /**
- This function checks whether the current bit position is byte-aligned or not.
- \param "stream" "Pointer to the bitstream structure."
- \return "true if byte-aligned, false otherwise."
- */
- bool byte_aligned(AVCEncBitstream *stream);
-
-
- /**
- This function checks the availability of overrun buffer and switches to use it when
- normal bufffer is not big enough.
- \param "stream" "Pointer to the bitstream structure."
- \param "numExtraBytes" "Number of extra byte needed."
- \return "AVCENC_SUCCESS or AVCENC_FAIL."
- */
- AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes);
-
-
- /*-------------- intra_est.c ---------------*/
-
- /** This function performs intra/inter decision based on ABE.
- \param "encvid" "Pointer to AVCEncObject."
- \param "min_cost" "Best inter cost."
- \param "curL" "Pointer to the current MB origin in reconstructed frame."
- \param "picPitch" "Pitch of the reconstructed frame."
- \return "Boolean for intra mode."
- */
-
-//bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch);
- bool IntraDecision(int *min_cost, uint8 *cur, int pitch, bool ave);
-
- /**
- This function performs intra prediction mode search.
- \param "encvid" "Pointer to AVCEncObject."
- \param "mbnum" "Current MB number."
- \param "curL" "Pointer to the current MB origin in reconstructed frame."
- \param "picPitch" "Pitch of the reconstructed frame."
- \return "void."
- */
- void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch);
-
- /**
- This function generates all the I16 prediction modes for an MB and keep it in
- encvid->pred_i16.
- \param "encvid" "Pointer to AVCEncObject."
- \return "void"
- */
- void intrapred_luma_16x16(AVCEncObject *encvid);
-
- /**
- This function calculate the cost of all I16 modes and compare them to get the minimum.
- \param "encvid" "Pointer to AVCEncObject."
- \param "orgY" "Pointer to the original luma MB."
- \param "min_cost" "Pointer to the minimal cost so-far."
- \return "void"
- */
- void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost);
-
- /**
- This function calculates the cost of each I16 mode.
- \param "org" "Pointer to the original luma MB."
- \param "org_pitch" "Stride size of the original frame."
- \param "pred" "Pointer to the prediction values."
- \param "min_cost" "Minimal cost so-far."
- \return "Cost"
- */
-
- int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost);
-
- /**
- This function generates all the I4 prediction modes and select the best one
- for all the blocks inside a macroblock.It also calls dct_luma to generate the reconstructed
- MB, and transform coefficients to be encoded.
- \param "encvid" "Pointer to AVCEncObject."
- \param "min_cost" "Pointer to the minimal cost so-far."
- \return "void"
- */
- void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost);
-
- /**
- This function calculates the most probable I4 mode of a given 4x4 block
- from neighboring informationaccording to AVC/H.264 standard.
- \param "video" "Pointer to AVCCommonObj."
- \param "blkidx" "The current block index."
- \return "Most probable mode."
- */
- int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx);
-
- /**
- This function is where a lot of actions take place in the 4x4 block level inside
- mb_intra4x4_search.
- \param "encvid" "Pointer to AVCEncObject."
- \param "blkidx" "The current 4x4 block index."
- \param "cur" "Pointer to the reconstructed block."
- \param "org" "Pointer to the original block."
- \return "Minimal cost, also set currMB->i4Mode"
- */
- int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org);
-
- /**
- This function calculates the cost of a given I4 prediction mode.
- \param "org" "Pointer to the original block."
- \param "org_pitch" "Stride size of the original frame."
- \param "pred" "Pointer to the prediction block. (encvid->pred_i4)"
- \param "cost" "Pointer to the minimal cost (to be updated)."
- \return "void"
- */
- void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost);
-
- /**
- This function performs chroma intra search. Each mode is saved in encvid->pred_ic.
- \param "encvid" "Pointer to AVCEncObject."
- \return "void"
- */
- void chroma_intra_search(AVCEncObject *encvid);
-
- /**
- This function calculates the cost of a chroma prediction mode.
- \param "orgCb" "Pointer to the original Cb block."
- \param "orgCr" "Pointer to the original Cr block."
- \param "org_pitch" "Stride size of the original frame."
- \param "pred" "Pointer to the prediction block (encvid->pred_ic)"
- \param "mincost" "Minimal cost so far."
- \return "Cost."
- */
-
- int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int mincost);
-
- /*-------------- motion_comp.c ---------------*/
-
- /**
- This is a main function to peform inter prediction.
- \param "encvid" "Pointer to AVCEncObject."
- \param "video" "Pointer to AVCCommonObj."
- \return "void".
- */
- void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video);
-
-
- /**
- This function is called for luma motion compensation.
- \param "ref" "Pointer to the origin of a reference luma."
- \param "picwidth" "Width of the picture."
- \param "picheight" "Height of the picture."
- \param "x_pos" "X-coordinate of the predicted block in quarter pel resolution."
- \param "y_pos" "Y-coordinate of the predicted block in quarter pel resolution."
- \param "pred" "Pointer to the output predicted block."
- \param "pred_pitch" "Width of pred."
- \param "blkwidth" "Width of the current partition."
- \param "blkheight" "Height of the current partition."
- \return "void"
- */
- void eLumaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight);
-
- void eFullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch,
- int blkwidth, int blkheight);
-
- void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx);
-
- void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx);
-
- void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight);
-
- void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy);
-
- void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight);
-
- void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy);
-
- void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
- uint8 *out, int outpitch,
- int blkwidth, int blkheight);
-
- void eChromaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos, uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight);
-
- void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
- void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight);
-
-
- /*-------------- motion_est.c ---------------*/
-
- /**
- Allocate and initialize arrays necessary for motion search algorithm.
- \param "envid" "Pointer to AVCEncObject."
- \return "AVC_SUCCESS or AVC_MEMORY_FAIL."
- */
- AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle);
-
- /**
- Clean up memory allocated in InitMotionSearchModule.
- \param "envid" "Pointer to AVCEncObject."
- \return "void."
- */
- void CleanMotionSearchModule(AVCHandle *avcHandle);
-
-
- /**
- This function performs motion estimation of all macroblocks in a frame during the InitFrame.
- The goal is to find the best MB partition for inter and find out if intra search is needed for
- any MBs. This intra MB tendency can be used for scene change detection.
- \param "encvid" "Pointer to AVCEncObject."
- \return "void"
- */
- void AVCMotionEstimation(AVCEncObject *encvid);
-
- /**
- This function performs repetitive edge padding to the reference picture by adding 16 pixels
- around the luma and 8 pixels around the chromas.
- \param "refPic" "Pointer to the reference picture."
- \return "void"
- */
- void AVCPaddingEdge(AVCPictureData *refPic);
-
- /**
- This function keeps track of intra refresh macroblock locations.
- \param "encvid" "Pointer to the global array structure AVCEncObject."
- \param "mblock" "Pointer to the array of AVCMacroblock structures."
- \param "totalMB" "Total number of MBs in a frame."
- \param "numRefresh" "Number of MB to be intra refresh in a single frame."
- \return "void"
- */
- void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh);
-
-#ifdef HTFM
- void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect);
- void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat);
- void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[]);
- void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch);
-#endif
-
- /**
- This function reads the input MB into a smaller faster memory space to minimize the cache miss.
- \param "encvid" "Pointer to the global AVCEncObject."
- \param "cur" "Pointer to the original input macroblock."
- \param "pitch" "Stride size of the input frame (luma)."
- \return "void"
- */
- void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch);
-
- /**
- Performs motion vector search for a macroblock.
- \param "encvid" "Pointer to AVCEncObject structure."
- \param "cur" "Pointer to the current macroblock in the input frame."
- \param "best_cand" "Array of best candidates (to be filled in and returned)."
- \param "i0" "X-coordinate of the macroblock."
- \param "j0" "Y-coordinate of the macroblock."
- \param "type_pred" "Indicates the type of operations."
- \param "FS_en" "Flag for fullsearch enable."
- \param "hp_guess" "Guess for half-pel search."
- \return "void"
- */
- void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],
- int i0, int j0, int type_pred, int FS_en, int *hp_guess);
-
-//AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,
-// int num_pass);
-
- /**
- Perform full-pel exhaustive search around the predicted MV.
- \param "encvid" "Pointer to AVCEncObject structure."
- \param "prev" "Pointer to the reference frame."
- \param "cur" "Pointer to the input macroblock."
- \param "imin" "Pointer to minimal mv (x)."
- \param "jmin" "Pointer to minimal mv (y)."
- \param "ilow, ihigh, jlow, jhigh" "Lower bound on search range."
- \param "cmvx, cmvy" "Predicted MV value."
-
- \return "The cost function of the best candidate."
- */
- int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,
- int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,
- int cmvx, int cmvy);
-
- /**
- Select candidates from neighboring blocks according to the type of the
- prediction selection.
- \param "mvx" "Pointer to the candidate, x-coordinate."
- \param "mvy" "Pointer to the candidate, y-coordinate."
- \param "num_can" "Pointer to the number of candidates returned."
- \param "imb" "The MB index x-coordinate."
- \param "jmb" "The MB index y-coordinate."
- \param "type_pred" "Type of the prediction."
- \param "cmvx, cmvy" "Pointer to predicted MV (modified version)."
- \return "void."
- */
- void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,
- AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy);
-
- /**
- Utility function to move the values in the array dn according to the new
- location to avoid redundant calculation.
- \param "dn" "Array of integer of size 9."
- \param "new_loc" "New location index."
- \return "void."
- */
- void AVCMoveNeighborSAD(int dn[], int new_loc);
-
- /**
- Find minimum index of dn.
- \param "dn" "Array of integer of size 9."
- \return "The index of dn with the smallest dn[] value."
- */
- int AVCFindMin(int dn[]);
-
-
- /*------------- findhalfpel.c -------------------*/
-
- /**
- Search for the best half-pel resolution MV around the full-pel MV.
- \param "encvid" "Pointer to the global AVCEncObject structure."
- \param "cur" "Pointer to the current macroblock."
- \param "mot" "Pointer to the AVCMV array of the frame."
- \param "ncand" "Pointer to the origin of the fullsearch result."
- \param "xpos" "The current MB position in x."
- \param "ypos" "The current MB position in y."
- \param "hp_guess" "Input to help speedup the search."
- \param "cmvx, cmvy" "Predicted motion vector use for mvcost."
- \return "Minimal cost (SATD) without MV cost. (for rate control purpose)"
- */
- int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,
- int xpos, int ypos, int hp_guess, int cmvx, int cmvy);
-
- /**
- This function generates sub-pel pixels required to do subpel MV search.
- \param "subpel_pred" "Pointer to 2-D array, each array for each position."
- \param "ncand" "Pointer to the full-pel center position in ref frame."
- \param "lx" "Pitch of the ref frame."
- \return "void"
- */
- void GenerateHalfPelPred(uint8 *subpel_pred, uint8 *ncand, int lx);
-
- /**
- This function calculate vertical interpolation at half-point of size 4x17.
- \param "dst" "Pointer to destination."
- \param "ref" "Pointer to the starting reference pixel."
- \return "void."
- */
- void VertInterpWClip(uint8 *dst, uint8 *ref);
-
- /**
- This function generates quarter-pel pixels around the best half-pel result
- during the sub-pel MV search.
- \param "bilin_base" "Array of pointers to be used as basis for q-pel interp."
- \param "qpel_pred" "Array of pointers pointing to quarter-pel candidates."
- \param "hpel_pos" "Best half-pel position at the center."
- \return "void"
- */
- void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_pred, int hpel_pos);
-
- /**
- This function calculates the SATD of a subpel candidate.
- \param "cand" "Pointer to a candidate."
- \param "cur" "Pointer to the current block."
- \param "dmin" "Min-so-far SATD."
- \return "Sum of Absolute Transformed Difference."
- */
- int SATD_MB(uint8 *cand, uint8 *cur, int dmin);
-
- /*------------- rate_control.c -------------------*/
-
- /** This function is a utility function. It returns average QP of the previously encoded frame.
- \param "rateCtrl" "Pointer to AVCRateControl structure."
- \return "Average QP."
- */
- int GetAvgFrameQP(AVCRateControl *rateCtrl);
-
- /**
- This function takes the timestamp of the input and determine whether it should be encoded
- or skipped.
- \param "encvid" "Pointer to the AVCEncObject structure."
- \param "rateCtrl" "Pointer to the AVCRateControl structure."
- \param "modTime" "The 32 bit timestamp of the input frame."
- \param "frameNum" "Pointer to the frame number if to be encoded."
- \return "AVC_SUCCESS or else."
- */
- AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum);
-
- /**
- This function updates the buffer fullness when frames are dropped either by the
- rate control algorithm or by the users to make sure that target bit rate is still met.
- \param "video" "Pointer to the common object structure."
- \param "rateCtrl" "Pointer to rate control structure."
- \param "frameInc" "Difference of the current frame number and previous frame number."
- \return "void."
- */
- void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc);
-
- /**
- This function initializes rate control module and allocates necessary bufferes to do the job.
- \param "avcHandle" "Pointer to the encoder handle."
- \return "AVCENC_SUCCESS or AVCENC_MEMORY_FAIL."
- */
- AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle);
-
- /**
- This function frees buffers allocated in InitRateControlModule.
- \param "avcHandle" "Pointer to the encoder handle."
- \return "void."
- */
- void CleanupRateControlModule(AVCHandle *avcHandle);
-
- /**
- This function is called at the beginning of each GOP or the first IDR frame. It calculates
- target bits for a GOP.
- \param "encvid" "Pointer to the encoder object."
- \return "void."
- */
- void RCInitGOP(AVCEncObject *encvid);
-
- /**
- This function calculates target bits for a particular frame.
- \param "video" "Pointer to the AVCEncObject structure."
- \return "void"
- */
- void RCInitFrameQP(AVCEncObject *video);
-
- /**
- This function calculates QP for the upcoming frame or basic unit.
- \param "encvid" "Pointer to the encoder object."
- \param "rateCtrl" "Pointer to the rate control object."
- \return "QP value ranging from 0-51."
- */
- int RCCalculateQP(AVCEncObject *encvid, AVCRateControl *rateCtrl);
-
- /**
- This function translates the luma QP to chroma QP and calculates lambda based on QP.
- \param "video" "Pointer to the AVCEncObject structure."
- \return "void"
- */
- void RCInitChromaQP(AVCEncObject *encvid);
-
- /**
- This function is called before encoding each macroblock.
- \param "encvid" "Pointer to the encoder object."
- \return "void."
- */
- void RCInitMBQP(AVCEncObject *encvid);
-
- /**
- This function updates bits usage stats after encoding an macroblock.
- \param "video" "Pointer to AVCCommonObj."
- \param "rateCtrl" "Pointer to AVCRateControl."
- \param "num_header_bits" "Number of bits used for MB header."
- \param "num_texture_bits" "Number of bits used for MB texture."
- \return "void"
- */
- void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits);
-
- /**
- This function calculates the difference between prediction and original MB.
- \param "encvid" "Pointer to the encoder object."
- \param "currMB" "Pointer to the current macroblock structure."
- \param "orgL" "Pointer to the original MB."
- \param "orgPitch" "Pointer to the original picture pitch."
- \return "void."
- */
- void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch);
-
- /**
- Restore QP related parameters of previous MB when current MB is skipped.
- \param "currMB" "Pointer to the current macroblock."
- \param "video" "Pointer to the common video structure."
- \param "encvid" "Pointer to the global encoding structure."
- \return "void"
- */
- void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid);
-
- /**
- This function is called after done with a frame.
- \param "encvid" "Pointer to the encoder object."
- \return "AVCENC_SUCCESS or AVCENC_SKIPPED_PICTURE when bufer overflow (need to discard current frame)."
- */
- AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid);
-
- /*--------- residual.c -------------------*/
-
- /**
- This function encodes the intra pcm data and fill it in the corresponding location
- on the current picture.
- \param "video" "Pointer to AVCEncObject."
- \return "AVCENC_SUCCESS if success, or else for bitstream errors."
- */
- AVCEnc_Status EncodeIntraPCM(AVCEncObject *video);
-
- /**
- This function performs CAVLC syntax encoding on the run and level information of the coefficients.
- The level and run arrays are elements in AVCEncObject structure, populated by TransQuantZZ,
- TransQuantIntraDC and TransQuantChromaDC functions.
- \param "video" "Pointer to AVCEncObject."
- \param "type" "One of AVCResidualType for a particular 4x4 block."
- \param "bindx" "Block index or number of nonzero coefficients for AVC_Intra16DC and AVC_ChromaDC mode."
- \param "currMB" "Pointer to the current macroblock structure."
- \return "AVCENC_SUCCESS for success."
- \Note "This function has 32-bit machine specific instruction!!!!"
- */
- AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int bindx, AVCMacroblock *currMB);
-
-
- /*------------- sad.c ---------------------------*/
-
-
- int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
-
-#ifdef HTFM /* 3/2/1, Hypothesis Testing Fast Matching */
- int AVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
- int AVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
- int AVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
- int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
- int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
-#endif
-
-
- /*------------- slice.c -------------------------*/
-
- /**
- This function performs the main encoding loop for a slice.
- \param "encvid" "Pointer to AVCEncObject."
- \return "AVCENC_SUCCESS for success, AVCENC_PICTURE_READY for end-of-picture and
- AVCENC_FAIL or AVCENC_SLICE_EMPTY otherwise."
- */
- AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid);
-
- /**
- This function performs the main encoding operation for one macroblock.
- \param "video" "pointer to AVCEncObject."
- \return "AVCENC_SUCCESS for success, or other bitstream related failure status."
- */
- AVCEnc_Status EncodeMB(AVCEncObject *video);
-
- /**
- This function calls prediction INTRA/INTER functions, transform,
- quantization and zigzag scanning to get the run-level symbols.
- \param "encvid" "pointer to AVCEncObject."
- \param "curL" "pointer to Luma component of the current frame.
- \param "curCb" "pointer to Cb component of the current frame.
- \param "curCr" "pointer to Cr component of the current frame.
- \return "void for now."
- */
- void MBPredTransQuantZZ(AVCEncObject *encvid, uint8 *curL, uint8 *curCb, uint8 *curCr);
-
- /**
- This function copies the content of the prediction MB into the reconstructed YUV
- frame directly.
- \param "curL" "Pointer to the destination Y component."
- \param "curCb" "Pointer to the destination Cb component."
- \param "curCr" "Pointer to the destination Cr component."
- \param "predBlock" "Pointer to the prediction MB."
- \param "picWidth" "The width of the frame."
- \return "None."
- */
- void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picWidth);
-
- /**
- This function encodes the mb_type, CBP, prediction mode, ref idx and MV.
- \param "currMB" "Pointer to the current macroblock structure."
- \param "video" "Pointer to the AVCEncObject structure."
- \return "AVCENC_SUCCESS for success or else for fail."
- */
- AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *video);
-
- /**
- This function finds the right mb_type for a macroblock given the mbMode, CBP,
- NumPart, PredPartMode.
- \param "currMB" "Pointer to the current macroblock structure."
- \param "slice_type" "Value of the slice_type."
- \return "mb_type."
- */
- uint InterpretMBType(AVCMacroblock *currMB, int slice_type);
-
- /**
- This function encodes the mb_pred part of the macroblock data.
- \param "video" "Pointer to the AVCCommonObj structure."
- \param "currMB" "Pointer to the current macroblock structure."
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \return "AVCENC_SUCCESS for success or bitstream fail status."
- */
- AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
-
- /**
- This function encodes the sub_mb_pred part of the macroblock data.
- \param "video" "Pointer to the AVCCommonObj structure."
- \param "currMB" "Pointer to the current macroblock structure."
- \param "stream" "Pointer to the AVCEncBitstream structure."
- \return "AVCENC_SUCCESS for success or bitstream fail status."
- */
- AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
-
- /**
- This function interprets the sub_mb_type and sets necessary information
- when the slice type is AVC_P_SLICE.
- in the macroblock structure.
- \param "mblock" "Pointer to current AVCMacroblock."
- \param "sub_mb_type" "From the syntax bitstream."
- \return "void"
- */
- void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type);
-
- /**
- This function interprets the sub_mb_type and sets necessary information
- when the slice type is AVC_B_SLICE.
- in the macroblock structure.
- \param "mblock" "Pointer to current AVCMacroblock."
- \param "sub_mb_type" "From the syntax bitstream."
- \return "void"
- */
- void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type);
-
- /**
- This function encodes intra 4x4 mode. It calculates the predicted I4x4 mode and the
- remnant to be encoded.
- \param "video" "Pointer to AVCEncObject structure."
- \param "currMB" "Pointer to the AVCMacroblock structure."
- \param "stream" "Pointer to AVCEncBitstream sructure."
- \return "AVCENC_SUCCESS for success."
- */
- AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
-
- /*------------- vlc_encode.c -----------------------*/
- /**
- This function encodes and writes a value into an Exp-Golomb codeword.
- \param "bitstream" "Pointer to AVCEncBitstream."
- \param "codeNum" "Pointer to the value of the codeNum."
- \return "AVCENC_SUCCESS for success or bitstream error messages for fail."
- */
- AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum);
-
- /**
- This function maps and encodes signed Exp-Golomb codes.
- \param "bitstream" "Pointer to AVCEncBitstream."
- \param "value" "Pointer to syntax element value."
- \return "AVCENC_SUCCESS or AVCENC_FAIL."
- */
- AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value);
-
- /**
- This function maps and encodes truncated Exp-Golomb codes.
- \param "bitstream" "Pointer to AVCEncBitstream."
- \param "value" "Pointer to syntax element value."
- \param "range" "Range of the value as input to determine the algorithm."
- \return "AVCENC_SUCCESS or AVCENC_FAIL."
- */
- AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range);
-
- /**
- This function creates Exp-Golomb codeword from codeNum.
- \param "bitstream" "Pointer to AVCEncBitstream."
- \param "codeNum" "Pointer to the codeNum value."
- \return "AVCENC_SUCCESS for success or bitstream error messages for fail."
- */
- AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum);
-
- /**
- This function performs CAVLC encoding of the CBP (coded block pattern) of a macroblock
- by calling ue_v() and then mapping the CBP to the corresponding VLC codeNum.
- \param "currMB" "Pointer to the current AVCMacroblock structure."
- \param "stream" "Pointer to the AVCEncBitstream."
- \return "void"
- */
- AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream);
-
- /**
- This function encodes trailing ones and total coefficient.
- \param "stream" "Pointer to the AVCEncBitstream."
- \param "TrailingOnes" "The trailing one variable output."
- \param "TotalCoeff" "The total coefficient variable output."
- \param "nC" "Context for number of nonzero coefficient (prediction context)."
- \return "AVCENC_SUCCESS for success or else for bitstream failure."
- */
- AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC);
-
- /**
- This function encodes trailing ones and total coefficient for chroma DC block.
- \param "stream" "Pointer to the AVCEncBitstream."
- \param "TrailingOnes" "The trailing one variable output."
- \param "TotalCoeff" "The total coefficient variable output."
- \return "AVCENC_SUCCESS for success or else for bitstream failure."
- */
- AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff);
-
- /**
- This function encodes total_zeros value as in Table 9-7 and 9-8.
- \param "stream" "Pointer to the AVCEncBitstream."
- \param "TotalZeros" "The total_zeros value."
- \param "TotalCoeff" "The total coefficient variable output."
- \return "AVCENC_SUCCESS for success or else for bitstream failure."
- */
- AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);
-
- /**
- This function encodes total_zeros VLC syntax for chroma DC as in Table 9-9.
- \param "stream" "Pointer to the AVCEncBitstream."
- \param "TotalZeros" "The total_zeros value."
- \param "TotalCoeff" "The total coefficient variable output."
- \return "AVCENC_SUCCESS for success or else for bitstream failure."
- */
- AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);
-
- /**
- This function encodes run_before VLC syntax as in Table 9-10.
- \param "stream" "Pointer to the AVCEncBitstream."
- \param "run_before" "The run_before value."
- \param "zerosLeft" "The context for number of zeros left."
- \return "AVCENC_SUCCESS for success or else for bitstream failure."
- */
- AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft);
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /* _AVCENC_LIB_H_ */
-
diff --git a/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp b/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
deleted file mode 100644
index d71c327..0000000
--- a/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
+++ /dev/null
@@ -1,339 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-#define WORD_SIZE 32
-
-/* array for trailing bit pattern as function of number of bits */
-/* the first one is unused. */
-const static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};
-
-/* ======================================================================== */
-/* Function : BitstreamInit() */
-/* Date : 11/4/2003 */
-/* Purpose : Populate bitstream structure with bitstream buffer and size */
-/* it also initializes internal data */
-/* In/out : */
-/* Return : AVCENC_SUCCESS if successed, AVCENC_FAIL if failed. */
-/* Modified : */
-/* ======================================================================== */
-/* |--------|--------|----~~~~~-----|---------|---------|---------|
- ^ ^write_pos ^buf_size
- bitstreamBuffer <--------->
- current_word
-
- |-----xxxxxxxxxxxxx| = current_word 32 or 16 bits
- <---->
- bit_left
- ======================================================================== */
-
-AVCEnc_Status BitstreamEncInit(AVCEncBitstream *stream, uint8 *buffer, int buf_size,
- uint8 *overrunBuffer, int oBSize)
-{
- if (stream == NULL || buffer == NULL || buf_size <= 0)
- {
- return AVCENC_BITSTREAM_INIT_FAIL;
- }
-
- stream->bitstreamBuffer = buffer;
-
- stream->buf_size = buf_size;
-
- stream->write_pos = 0;
-
- stream->count_zeros = 0;
-
- stream->current_word = 0;
-
- stream->bit_left = WORD_SIZE;
-
- stream->overrunBuffer = overrunBuffer;
-
- stream->oBSize = oBSize;
-
- return AVCENC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : AVCBitstreamSaveWord() */
-/* Date : 3/29/2004 */
-/* Purpose : Save the current_word into the buffer, byte-swap, and */
-/* add emulation prevention insertion. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
-/* full. */
-/* Modified : */
-/* ======================================================================== */
-AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream)
-{
- int num_bits;
- uint8 *write_pnt, byte;
- uint current_word;
-
- /* check number of bytes in current_word, must always be byte-aligned!!!! */
- num_bits = WORD_SIZE - stream->bit_left; /* must be multiple of 8 !!*/
-
- if (stream->buf_size - stream->write_pos <= (num_bits >> 3) + 2) /* 2 more bytes for possible EPBS */
- {
- if (AVCENC_SUCCESS != AVCBitstreamUseOverrunBuffer(stream, (num_bits >> 3) + 2))
- {
- return AVCENC_BITSTREAM_BUFFER_FULL;
- }
- }
-
- /* write word, byte-by-byte */
- write_pnt = stream->bitstreamBuffer + stream->write_pos;
- current_word = stream->current_word;
- while (num_bits) /* no need to check stream->buf_size and stream->write_pos, taken care already */
- {
- num_bits -= 8;
- byte = (current_word >> num_bits) & 0xFF;
- if (stream->count_zeros == 2)
- { /* for num_bits = 32, this can add 2 more bytes extra for EPBS */
- if (byte <= 3)
- {
- *write_pnt++ = 0x3;
- stream->write_pos++;
- stream->count_zeros = 0;
- }
- }
- if (byte != 0)
- {
- *write_pnt++ = byte;
- stream->write_pos++;
- stream->count_zeros = 0;
- }
- else
- {
- stream->count_zeros++;
- *write_pnt++ = byte;
- stream->write_pos++;
- }
- }
-
- /* reset current_word and bit_left */
- stream->current_word = 0;
- stream->bit_left = WORD_SIZE;
-
- return AVCENC_SUCCESS;
-}
-
-/* ======================================================================== */
-/* Function : BitstreamWriteBits() */
-/* Date : 3/29/2004 */
-/* Purpose : Write up to machine word. */
-/* In/out : Unused bits in 'code' must be all zeros. */
-/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
-/* full. */
-/* Modified : */
-/* ======================================================================== */
-AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- int bit_left = stream->bit_left;
- uint current_word = stream->current_word;
-
- //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWriteBits",nBits,-1);
-
- if (nBits > WORD_SIZE) /* has to be taken care of specially */
- {
- return AVCENC_FAIL; /* for now */
- /* otherwise, break it down to 2 write of less than 16 bits at a time. */
- }
-
- if (nBits <= bit_left) /* more bits left in current_word */
- {
- stream->current_word = (current_word << nBits) | code;
- stream->bit_left -= nBits;
- if (stream->bit_left == 0) /* prepare for the next word */
- {
- status = AVCBitstreamSaveWord(stream);
- return status;
- }
- }
- else
- {
- stream->current_word = (current_word << bit_left) | (code >> (nBits - bit_left));
-
- nBits -= bit_left;
-
- stream->bit_left = 0;
-
- status = AVCBitstreamSaveWord(stream); /* save current word */
-
- stream->bit_left = WORD_SIZE - nBits;
-
- stream->current_word = code; /* no extra masking for code, must be handled before saving */
- }
-
- return status;
-}
-
-
-/* ======================================================================== */
-/* Function : BitstreamWrite1Bit() */
-/* Date : 3/30/2004 */
-/* Purpose : Write 1 bit */
-/* In/out : Unused bits in 'code' must be all zeros. */
-/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
-/* full. */
-/* Modified : */
-/* ======================================================================== */
-AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code)
-{
- AVCEnc_Status status;
- uint current_word = stream->current_word;
-
- //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWrite1Bit",code,-1);
-
- //if(1 <= bit_left) /* more bits left in current_word */
- /* we can assume that there always be positive bit_left in the current word */
- stream->current_word = (current_word << 1) | code;
- stream->bit_left--;
- if (stream->bit_left == 0) /* prepare for the next word */
- {
- status = AVCBitstreamSaveWord(stream);
- return status;
- }
-
- return AVCENC_SUCCESS;
-}
-
-
-/* ======================================================================== */
-/* Function : BitstreamTrailingBits() */
-/* Date : 3/31/2004 */
-/* Purpose : Add trailing bits and report the final EBSP size. */
-/* In/out : */
-/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
-/* full. */
-/* Modified : */
-/* ======================================================================== */
-AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size)
-{
- (void)(nal_size);
-
- AVCEnc_Status status;
- int bit_left = bitstream->bit_left;
-
- bit_left &= 0x7; /* modulo by 8 */
- if (bit_left == 0) bit_left = 8;
- /* bitstream->bit_left == 0 cannot happen here since it would have been Saved already */
-
- status = BitstreamWriteBits(bitstream, bit_left, trailing_bits[bit_left]);
-
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- /* if it's not saved, save it. */
- //if(bitstream->bit_left<(WORD_SIZE<<3)) /* in fact, no need to check */
- {
- status = AVCBitstreamSaveWord(bitstream);
- }
-
- return status;
-}
-
-/* check whether it's byte-aligned */
-bool byte_aligned(AVCEncBitstream *stream)
-{
- if (stream->bit_left % 8)
- return false;
- else
- return true;
-}
-
-
-/* determine whether overrun buffer can be used or not */
-AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes)
-{
- AVCEncObject *encvid = (AVCEncObject*)stream->encvid;
-
- if (stream->overrunBuffer != NULL) // overrunBuffer is set
- {
- if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used
- {
- if (stream->write_pos + numExtraBytes >= stream->oBSize)
- {
- stream->oBSize = stream->write_pos + numExtraBytes + 100;
- stream->oBSize &= (~0x3); // make it multiple of 4
-
- // allocate new overrun Buffer
- if (encvid->overrunBuffer)
- {
- encvid->avcHandle->CBAVC_Free(encvid->avcHandle->userData,
- encvid->overrunBuffer);
- }
-
- encvid->oBSize = stream->oBSize;
- encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
- stream->oBSize, DEFAULT_ATTR);
-
- stream->overrunBuffer = encvid->overrunBuffer;
- if (stream->overrunBuffer == NULL)
- {
- return AVCENC_FAIL;
- }
- }
-
- // copy everything to overrun buffer and start using it.
- memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->write_pos);
- stream->bitstreamBuffer = stream->overrunBuffer;
- stream->buf_size = stream->oBSize;
- }
- else // overrun buffer is already used
- {
- stream->oBSize = stream->write_pos + numExtraBytes + 100;
- stream->oBSize &= (~0x3); // make it multiple of 4
-
- // allocate new overrun buffer
- encvid->oBSize = stream->oBSize;
- encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
- stream->oBSize, DEFAULT_ATTR);
-
- if (encvid->overrunBuffer == NULL)
- {
- return AVCENC_FAIL;
- }
-
-
- // copy from the old buffer to new buffer
- memcpy(encvid->overrunBuffer, stream->overrunBuffer, stream->write_pos);
- // free old buffer
- encvid->avcHandle->CBAVC_Free(encvid->avcHandle->userData,
- stream->overrunBuffer);
-
- // assign pointer to new buffer
- stream->overrunBuffer = encvid->overrunBuffer;
- stream->bitstreamBuffer = stream->overrunBuffer;
- stream->buf_size = stream->oBSize;
- }
-
- return AVCENC_SUCCESS;
- }
- else // overrunBuffer is not enable.
- {
- return AVCENC_FAIL;
- }
-
-}
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/block.cpp b/media/libstagefright/codecs/avc/enc/src/block.cpp
deleted file mode 100644
index 01e26a6..0000000
--- a/media/libstagefright/codecs/avc/enc/src/block.cpp
+++ /dev/null
@@ -1,1283 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-/* subtract with the prediction and do transformation */
-void trans(uint8 *cur, int pitch, uint8 *predBlock, int16 *dataBlock)
-{
- int16 *ptr = dataBlock;
- int r0, r1, r2, r3, j;
- int curpitch = (uint)pitch >> 16;
- int predpitch = (pitch & 0xFFFF);
-
- /* horizontal */
- j = 4;
- while (j > 0)
- {
- /* calculate the residue first */
- r0 = cur[0] - predBlock[0];
- r1 = cur[1] - predBlock[1];
- r2 = cur[2] - predBlock[2];
- r3 = cur[3] - predBlock[3];
-
- r0 += r3; //ptr[0] + ptr[3];
- r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
- r1 += r2; //ptr[1] + ptr[2];
- r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
-
- ptr[0] = r0 + r1;
- ptr[2] = r0 - r1;
- ptr[1] = (r3 << 1) + r2;
- ptr[3] = r3 - (r2 << 1);
-
- ptr += 16;
- predBlock += predpitch;
- cur += curpitch;
- j--;
- }
- /* vertical */
- ptr = dataBlock;
- j = 4;
- while (j > 0)
- {
- r0 = ptr[0] + ptr[48];
- r3 = ptr[0] - ptr[48];
- r1 = ptr[16] + ptr[32];
- r2 = ptr[16] - ptr[32];
-
- ptr[0] = r0 + r1;
- ptr[32] = r0 - r1;
- ptr[16] = (r3 << 1) + r2;
- ptr[48] = r3 - (r2 << 1);
-
- ptr++;
- j--;
- }
-
- return ;
-}
-
-
-/* do residue transform quant invquant, invtrans and write output out */
-int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost)
-{
- AVCCommonObj *video = encvid->common;
- int org_pitch = encvid->currInput->pitch;
- int pitch = video->currPic->pitch;
- int16 *coef = video->block;
- uint8 *pred = video->pred_block; // size 16 for a 4x4 block
- int pred_pitch = video->pred_pitch;
- int r0, r1, r2, r3, j, k, idx;
- int *level, *run;
- int Qq, Rq, q_bits, qp_const, quant;
- int data, lev, zero_run;
- int numcoeff;
-
- coef += ((blkidx & 0x3) << 2) + ((blkidx >> 2) << 6); /* point to the 4x4 block */
-
- /* first take a 4x4 transform */
- /* horizontal */
- j = 4;
- while (j > 0)
- {
- /* calculate the residue first */
- r0 = org[0] - pred[0]; /* OPTIMIZEABLE */
- r1 = org[1] - pred[1];
- r2 = org[2] - pred[2];
- r3 = org[3] - pred[3];
-
- r0 += r3; //ptr[0] + ptr[3];
- r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
- r1 += r2; //ptr[1] + ptr[2];
- r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
-
- coef[0] = r0 + r1;
- coef[2] = r0 - r1;
- coef[1] = (r3 << 1) + r2;
- coef[3] = r3 - (r2 << 1);
-
- coef += 16;
- org += org_pitch;
- pred += pred_pitch;
- j--;
- }
- /* vertical */
- coef -= 64;
- pred -= (pred_pitch << 2);
- j = 4;
- while (j > 0) /* OPTIMIZABLE */
- {
- r0 = coef[0] + coef[48];
- r3 = coef[0] - coef[48];
- r1 = coef[16] + coef[32];
- r2 = coef[16] - coef[32];
-
- coef[0] = r0 + r1;
- coef[32] = r0 - r1;
- coef[16] = (r3 << 1) + r2;
- coef[48] = r3 - (r2 << 1);
-
- coef++;
- j--;
- }
-
- coef -= 4;
-
- /* quant */
- level = encvid->level[ras2dec[blkidx]];
- run = encvid->run[ras2dec[blkidx]];
-
- Rq = video->QPy_mod_6;
- Qq = video->QPy_div_6;
- qp_const = encvid->qp_const;
- q_bits = 15 + Qq;
-
- zero_run = 0;
- numcoeff = 0;
- for (k = 0; k < 16; k++)
- {
- idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
- data = coef[idx];
- quant = quant_coef[Rq][k];
- if (data > 0)
- {
- lev = data * quant + qp_const;
- }
- else
- {
- lev = -data * quant + qp_const;
- }
- lev >>= q_bits;
- if (lev)
- {
- *coef_cost += ((lev > 1) ? MAX_VALUE : COEFF_COST[DISABLE_THRESHOLDING][zero_run]);
-
- /* dequant */
- quant = dequant_coefres[Rq][k];
- if (data > 0)
- {
- level[numcoeff] = lev;
- coef[idx] = (lev * quant) << Qq;
- }
- else
- {
- level[numcoeff] = -lev;
- coef[idx] = (-lev * quant) << Qq;
- }
- run[numcoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- coef[idx] = 0;
- }
- }
-
- if (video->currMB->mb_intra) // only do inverse transform with intra block
- {
- if (numcoeff) /* then do inverse transform */
- {
- for (j = 4; j > 0; j--) /* horizontal */
- {
- r0 = coef[0] + coef[2];
- r1 = coef[0] - coef[2];
- r2 = (coef[1] >> 1) - coef[3];
- r3 = coef[1] + (coef[3] >> 1);
-
- coef[0] = r0 + r3;
- coef[1] = r1 + r2;
- coef[2] = r1 - r2;
- coef[3] = r0 - r3;
-
- coef += 16;
- }
-
- coef -= 64;
- for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */
- {
- r0 = coef[0] + coef[32];
- r1 = coef[0] - coef[32];
- r2 = (coef[16] >> 1) - coef[48];
- r3 = coef[16] + (coef[48] >> 1);
- r0 += r3;
- r3 = (r0 - (r3 << 1)); /* r0-r3 */
- r1 += r2;
- r2 = (r1 - (r2 << 1)); /* r1-r2 */
- r0 += 32;
- r1 += 32;
- r2 += 32;
- r3 += 32;
-
- r0 = pred[0] + (r0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = *(pred += pred_pitch) + (r1 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = *(pred += pred_pitch) + (r2 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[pred_pitch] + (r3 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
-
- *cur = r0;
- *(cur += pitch) = r1;
- *(cur += pitch) = r2;
- cur[pitch] = r3;
- cur -= (pitch << 1);
- cur++;
- pred -= (pred_pitch << 1);
- pred++;
- coef++;
- }
- }
- else // copy from pred to cur
- {
- *((uint32*)cur) = *((uint32*)pred);
- *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
- *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
- *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
- }
- }
-
- return numcoeff;
-}
-
-
-void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch)
-{
- int16 *coef, *coef8 = video->block;
- uint8 *cur; // the same as curL
- int b8, b4;
- int r0, r1, r2, r3, j, blkidx;
-
- for (b8 = 0; b8 < 4; b8++)
- {
- cur = curL;
- coef = coef8;
-
- if (currMB->CBP&(1 << b8))
- {
- for (b4 = 0; b4 < 4; b4++)
- {
- blkidx = blkIdx2blkXY[b8][b4];
- /* do IDCT */
- if (currMB->nz_coeff[blkidx])
- {
- for (j = 4; j > 0; j--) /* horizontal */
- {
- r0 = coef[0] + coef[2];
- r1 = coef[0] - coef[2];
- r2 = (coef[1] >> 1) - coef[3];
- r3 = coef[1] + (coef[3] >> 1);
-
- coef[0] = r0 + r3;
- coef[1] = r1 + r2;
- coef[2] = r1 - r2;
- coef[3] = r0 - r3;
-
- coef += 16;
- }
-
- coef -= 64;
- for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */
- {
- r0 = coef[0] + coef[32];
- r1 = coef[0] - coef[32];
- r2 = (coef[16] >> 1) - coef[48];
- r3 = coef[16] + (coef[48] >> 1);
- r0 += r3;
- r3 = (r0 - (r3 << 1)); /* r0-r3 */
- r1 += r2;
- r2 = (r1 - (r2 << 1)); /* r1-r2 */
- r0 += 32;
- r1 += 32;
- r2 += 32;
- r3 += 32;
-
- r0 = cur[0] + (r0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- *cur = r0;
- r1 = *(cur += picPitch) + (r1 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- *cur = r1;
- r2 = *(cur += picPitch) + (r2 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- *cur = r2;
- r3 = cur[picPitch] + (r3 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- cur[picPitch] = r3;
-
- cur -= (picPitch << 1);
- cur++;
- coef++;
- }
- cur -= 4;
- coef -= 4;
- }
- if (b4&1)
- {
- cur += ((picPitch << 2) - 4);
- coef += 60;
- }
- else
- {
- cur += 4;
- coef += 4;
- }
- }
- }
-
- if (b8&1)
- {
- curL += ((picPitch << 3) - 8);
- coef8 += 120;
- }
- else
- {
- curL += 8;
- coef8 += 8;
- }
- }
-
- return ;
-}
-
-/* performa dct, quant, iquant, idct for the entire MB */
-void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL)
-{
- AVCCommonObj *video = encvid->common;
- int pitch = video->currPic->pitch;
- int org_pitch = encvid->currInput->pitch;
- AVCMacroblock *currMB = video->currMB;
- int16 *coef = video->block;
- uint8 *pred = encvid->pred_i16[currMB->i16Mode];
- int blk_x, blk_y, j, k, idx, b8, b4;
- int r0, r1, r2, r3, m0, m1, m2 , m3;
- int data, lev;
- int *level, *run, zero_run, ncoeff;
- int Rq, Qq, quant, q_bits, qp_const;
- int offset_cur[4], offset_pred[4], offset;
-
- /* horizontal */
- for (j = 16; j > 0; j--)
- {
- for (blk_x = 4; blk_x > 0; blk_x--)
- {
- /* calculate the residue first */
- r0 = *orgL++ - *pred++;
- r1 = *orgL++ - *pred++;
- r2 = *orgL++ - *pred++;
- r3 = *orgL++ - *pred++;
-
- r0 += r3; //ptr[0] + ptr[3];
- r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
- r1 += r2; //ptr[1] + ptr[2];
- r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
-
- *coef++ = r0 + r1;
- *coef++ = (r3 << 1) + r2;
- *coef++ = r0 - r1;
- *coef++ = r3 - (r2 << 1);
- }
- orgL += (org_pitch - 16);
- }
- pred -= 256;
- coef -= 256;
- /* vertical */
- for (blk_y = 4; blk_y > 0; blk_y--)
- {
- for (j = 16; j > 0; j--)
- {
- r0 = coef[0] + coef[48];
- r3 = coef[0] - coef[48];
- r1 = coef[16] + coef[32];
- r2 = coef[16] - coef[32];
-
- coef[0] = r0 + r1;
- coef[32] = r0 - r1;
- coef[16] = (r3 << 1) + r2;
- coef[48] = r3 - (r2 << 1);
-
- coef++;
- }
- coef += 48;
- }
-
- /* then perform DC transform */
- coef -= 256;
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[12];
- r3 = coef[0] - coef[12];
- r1 = coef[4] + coef[8];
- r2 = coef[4] - coef[8];
-
- coef[0] = r0 + r1;
- coef[8] = r0 - r1;
- coef[4] = r3 + r2;
- coef[12] = r3 - r2;
- coef += 64;
- }
- coef -= 256;
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[192];
- r3 = coef[0] - coef[192];
- r1 = coef[64] + coef[128];
- r2 = coef[64] - coef[128];
-
- coef[0] = (r0 + r1) >> 1;
- coef[128] = (r0 - r1) >> 1;
- coef[64] = (r3 + r2) >> 1;
- coef[192] = (r3 - r2) >> 1;
- coef += 4;
- }
-
- coef -= 16;
- // then quantize DC
- level = encvid->leveldc;
- run = encvid->rundc;
-
- Rq = video->QPy_mod_6;
- Qq = video->QPy_div_6;
- quant = quant_coef[Rq][0];
- q_bits = 15 + Qq;
- qp_const = encvid->qp_const;
-
- zero_run = 0;
- ncoeff = 0;
- for (k = 0; k < 16; k++) /* in zigzag scan order */
- {
- idx = ZIGZAG2RASTERDC[k];
- data = coef[idx];
- if (data > 0) // quant
- {
- lev = data * quant + (qp_const << 1);
- }
- else
- {
- lev = -data * quant + (qp_const << 1);
- }
- lev >>= (q_bits + 1);
- if (lev) // dequant
- {
- if (data > 0)
- {
- level[ncoeff] = lev;
- coef[idx] = lev;
- }
- else
- {
- level[ncoeff] = -lev;
- coef[idx] = -lev;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- coef[idx] = 0;
- }
- }
-
- /* inverse transform DC */
- encvid->numcoefdc = ncoeff;
- if (ncoeff)
- {
- quant = dequant_coefres[Rq][0];
-
- for (j = 0; j < 4; j++)
- {
- m0 = coef[0] + coef[4];
- m1 = coef[0] - coef[4];
- m2 = coef[8] + coef[12];
- m3 = coef[8] - coef[12];
-
-
- coef[0] = m0 + m2;
- coef[4] = m0 - m2;
- coef[8] = m1 - m3;
- coef[12] = m1 + m3;
- coef += 64;
- }
-
- coef -= 256;
-
- if (Qq >= 2) /* this way should be faster than JM */
- { /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */
- Qq -= 2;
- for (j = 0; j < 4; j++)
- {
- m0 = coef[0] + coef[64];
- m1 = coef[0] - coef[64];
- m2 = coef[128] + coef[192];
- m3 = coef[128] - coef[192];
-
- coef[0] = ((m0 + m2) * quant) << Qq;
- coef[64] = ((m0 - m2) * quant) << Qq;
- coef[128] = ((m1 - m3) * quant) << Qq;
- coef[192] = ((m1 + m3) * quant) << Qq;
- coef += 4;
- }
- Qq += 2; /* restore the value */
- }
- else
- {
- Qq = 2 - Qq;
- offset = 1 << (Qq - 1);
-
- for (j = 0; j < 4; j++)
- {
- m0 = coef[0] + coef[64];
- m1 = coef[0] - coef[64];
- m2 = coef[128] + coef[192];
- m3 = coef[128] - coef[192];
-
- coef[0] = (((m0 + m2) * quant + offset) >> Qq);
- coef[64] = (((m0 - m2) * quant + offset) >> Qq);
- coef[128] = (((m1 - m3) * quant + offset) >> Qq);
- coef[192] = (((m1 + m3) * quant + offset) >> Qq);
- coef += 4;
- }
- Qq = 2 - Qq; /* restore the value */
- }
- coef -= 16; /* back to the origin */
- }
-
- /* now zigzag scan ac coefs, quant, iquant and itrans */
- run = encvid->run[0];
- level = encvid->level[0];
-
- /* offset btw 4x4 block */
- offset_cur[0] = 0;
- offset_cur[1] = (pitch << 2) - 8;
-
- /* offset btw 8x8 block */
- offset_cur[2] = 8 - (pitch << 3);
- offset_cur[3] = -8;
-
- /* similarly for pred */
- offset_pred[0] = 0;
- offset_pred[1] = 56;
- offset_pred[2] = -120;
- offset_pred[3] = -8;
-
- currMB->CBP = 0;
-
- for (b8 = 0; b8 < 4; b8++)
- {
- for (b4 = 0; b4 < 4; b4++)
- {
-
- zero_run = 0;
- ncoeff = 0;
-
- for (k = 1; k < 16; k++)
- {
- idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
- data = coef[idx];
- quant = quant_coef[Rq][k];
- if (data > 0)
- {
- lev = data * quant + qp_const;
- }
- else
- {
- lev = -data * quant + qp_const;
- }
- lev >>= q_bits;
- if (lev)
- { /* dequant */
- quant = dequant_coefres[Rq][k];
- if (data > 0)
- {
- level[ncoeff] = lev;
- coef[idx] = (lev * quant) << Qq;
- }
- else
- {
- level[ncoeff] = -lev;
- coef[idx] = (-lev * quant) << Qq;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- coef[idx] = 0;
- }
- }
-
- currMB->nz_coeff[blkIdx2blkXY[b8][b4]] = ncoeff; /* in raster scan !!! */
- if (ncoeff)
- {
- currMB->CBP |= (1 << b8);
-
- // do inverse transform here
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[2];
- r1 = coef[0] - coef[2];
- r2 = (coef[1] >> 1) - coef[3];
- r3 = coef[1] + (coef[3] >> 1);
-
- coef[0] = r0 + r3;
- coef[1] = r1 + r2;
- coef[2] = r1 - r2;
- coef[3] = r0 - r3;
-
- coef += 16;
- }
- coef -= 64;
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[32];
- r1 = coef[0] - coef[32];
- r2 = (coef[16] >> 1) - coef[48];
- r3 = coef[16] + (coef[48] >> 1);
-
- r0 += r3;
- r3 = (r0 - (r3 << 1)); /* r0-r3 */
- r1 += r2;
- r2 = (r1 - (r2 << 1)); /* r1-r2 */
- r0 += 32;
- r1 += 32;
- r2 += 32;
- r3 += 32;
- r0 = pred[0] + (r0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = pred[16] + (r1 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = pred[32] + (r2 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[48] + (r3 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- *curL = r0;
- *(curL += pitch) = r1;
- *(curL += pitch) = r2;
- curL[pitch] = r3;
- curL -= (pitch << 1);
- curL++;
- pred++;
- coef++;
- }
- }
- else // do DC-only inverse
- {
- m0 = coef[0] + 32;
-
- for (j = 4; j > 0; j--)
- {
- r0 = pred[0] + (m0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = pred[16] + (m0 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = pred[32] + (m0 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[48] + (m0 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- *curL = r0;
- *(curL += pitch) = r1;
- *(curL += pitch) = r2;
- curL[pitch] = r3;
- curL -= (pitch << 1);
- curL++;
- pred++;
- }
- coef += 4;
- }
-
- run += 16; // follow coding order
- level += 16;
- curL += offset_cur[b4&1];
- pred += offset_pred[b4&1];
- coef += offset_pred[b4&1];
- }
-
- curL += offset_cur[2 + (b8&1)];
- pred += offset_pred[2 + (b8&1)];
- coef += offset_pred[2 + (b8&1)];
- }
-
- return ;
-}
-
-
-void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr)
-{
- AVCCommonObj *video = encvid->common;
- AVCMacroblock *currMB = video->currMB;
- int org_pitch = (encvid->currInput->pitch) >> 1;
- int pitch = (video->currPic->pitch) >> 1;
- int pred_pitch = 16;
- int16 *coef = video->block + 256;
- uint8 *pred = video->pred_block;
- int j, blk_x, blk_y, k, idx, b4;
- int r0, r1, r2, r3, m0;
- int Qq, Rq, qp_const, q_bits, quant;
- int *level, *run, zero_run, ncoeff;
- int data, lev;
- int offset_cur[2], offset_pred[2], offset_coef[2];
- uint8 nz_temp[4];
- int coeff_cost;
-
- if (cr)
- {
- coef += 8;
- pred += 8;
- }
-
- if (currMB->mb_intra == 0) // inter mode
- {
- pred = curC;
- pred_pitch = pitch;
- }
-
- /* do 4x4 transform */
- /* horizontal */
- for (j = 8; j > 0; j--)
- {
- for (blk_x = 2; blk_x > 0; blk_x--)
- {
- /* calculate the residue first */
- r0 = *orgC++ - *pred++;
- r1 = *orgC++ - *pred++;
- r2 = *orgC++ - *pred++;
- r3 = *orgC++ - *pred++;
-
- r0 += r3; //ptr[0] + ptr[3];
- r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
- r1 += r2; //ptr[1] + ptr[2];
- r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
-
- *coef++ = r0 + r1;
- *coef++ = (r3 << 1) + r2;
- *coef++ = r0 - r1;
- *coef++ = r3 - (r2 << 1);
-
- }
- coef += 8; // coef pitch is 16
- pred += (pred_pitch - 8); // pred_pitch is 16
- orgC += (org_pitch - 8);
- }
- pred -= (pred_pitch << 3);
- coef -= 128;
- /* vertical */
- for (blk_y = 2; blk_y > 0; blk_y--)
- {
- for (j = 8; j > 0; j--)
- {
- r0 = coef[0] + coef[48];
- r3 = coef[0] - coef[48];
- r1 = coef[16] + coef[32];
- r2 = coef[16] - coef[32];
-
- coef[0] = r0 + r1;
- coef[32] = r0 - r1;
- coef[16] = (r3 << 1) + r2;
- coef[48] = r3 - (r2 << 1);
-
- coef++;
- }
- coef += 56;
- }
- /* then perform DC transform */
- coef -= 128;
-
- /* 2x2 transform of DC components*/
- r0 = coef[0];
- r1 = coef[4];
- r2 = coef[64];
- r3 = coef[68];
-
- coef[0] = r0 + r1 + r2 + r3;
- coef[4] = r0 - r1 + r2 - r3;
- coef[64] = r0 + r1 - r2 - r3;
- coef[68] = r0 - r1 - r2 + r3;
-
- Qq = video->QPc_div_6;
- Rq = video->QPc_mod_6;
- quant = quant_coef[Rq][0];
- q_bits = 15 + Qq;
- qp_const = encvid->qp_const_c;
-
- zero_run = 0;
- ncoeff = 0;
- run = encvid->runcdc + (cr << 2);
- level = encvid->levelcdc + (cr << 2);
-
- /* in zigzag scan order */
- for (k = 0; k < 4; k++)
- {
- idx = ((k >> 1) << 6) + ((k & 1) << 2);
- data = coef[idx];
- if (data > 0)
- {
- lev = data * quant + (qp_const << 1);
- }
- else
- {
- lev = -data * quant + (qp_const << 1);
- }
- lev >>= (q_bits + 1);
- if (lev)
- {
- if (data > 0)
- {
- level[ncoeff] = lev;
- coef[idx] = lev;
- }
- else
- {
- level[ncoeff] = -lev;
- coef[idx] = -lev;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- coef[idx] = 0;
- }
- }
-
- encvid->numcoefcdc[cr] = ncoeff;
-
- if (ncoeff)
- {
- currMB->CBP |= (1 << 4); // DC present
- // do inverse transform
- quant = dequant_coefres[Rq][0];
-
- r0 = coef[0] + coef[4];
- r1 = coef[0] - coef[4];
- r2 = coef[64] + coef[68];
- r3 = coef[64] - coef[68];
-
- r0 += r2;
- r2 = r0 - (r2 << 1);
- r1 += r3;
- r3 = r1 - (r3 << 1);
-
- if (Qq >= 1)
- {
- Qq -= 1;
- coef[0] = (r0 * quant) << Qq;
- coef[4] = (r1 * quant) << Qq;
- coef[64] = (r2 * quant) << Qq;
- coef[68] = (r3 * quant) << Qq;
- Qq++;
- }
- else
- {
- coef[0] = (r0 * quant) >> 1;
- coef[4] = (r1 * quant) >> 1;
- coef[64] = (r2 * quant) >> 1;
- coef[68] = (r3 * quant) >> 1;
- }
- }
-
- /* now do AC zigzag scan, quant, iquant and itrans */
- if (cr)
- {
- run = encvid->run[20];
- level = encvid->level[20];
- }
- else
- {
- run = encvid->run[16];
- level = encvid->level[16];
- }
-
- /* offset btw 4x4 block */
- offset_cur[0] = 0;
- offset_cur[1] = (pitch << 2) - 8;
- offset_pred[0] = 0;
- offset_pred[1] = (pred_pitch << 2) - 8;
- offset_coef[0] = 0;
- offset_coef[1] = 56;
-
- coeff_cost = 0;
-
- for (b4 = 0; b4 < 4; b4++)
- {
- zero_run = 0;
- ncoeff = 0;
- for (k = 1; k < 16; k++) /* in zigzag scan order */
- {
- idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
- data = coef[idx];
- quant = quant_coef[Rq][k];
- if (data > 0)
- {
- lev = data * quant + qp_const;
- }
- else
- {
- lev = -data * quant + qp_const;
- }
- lev >>= q_bits;
- if (lev)
- {
- /* for RD performance*/
- if (lev > 1)
- coeff_cost += MAX_VALUE; // set high cost, shall not be discarded
- else
- coeff_cost += COEFF_COST[DISABLE_THRESHOLDING][zero_run];
-
- /* dequant */
- quant = dequant_coefres[Rq][k];
- if (data > 0)
- {
- level[ncoeff] = lev;
- coef[idx] = (lev * quant) << Qq;
- }
- else
- {
- level[ncoeff] = -lev;
- coef[idx] = (-lev * quant) << Qq;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- coef[idx] = 0;
- }
- }
-
- nz_temp[b4] = ncoeff; // raster scan
-
- // just advance the pointers for now, do IDCT later
- coef += 4;
- run += 16;
- level += 16;
- coef += offset_coef[b4&1];
- }
-
- /* rewind the pointers */
- coef -= 128;
-
- if (coeff_cost < _CHROMA_COEFF_COST_)
- {
- /* if it's not efficient to encode any blocks.
- Just do DC only */
- /* We can reset level and run also, but setting nz to zero should be enough. */
- currMB->nz_coeff[16+(cr<<1)] = 0;
- currMB->nz_coeff[17+(cr<<1)] = 0;
- currMB->nz_coeff[20+(cr<<1)] = 0;
- currMB->nz_coeff[21+(cr<<1)] = 0;
-
- for (b4 = 0; b4 < 4; b4++)
- {
- // do DC-only inverse
- m0 = coef[0] + 32;
-
- for (j = 4; j > 0; j--)
- {
- r0 = pred[0] + (m0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = *(pred += pred_pitch) + (m0 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = pred[pred_pitch] + (m0 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[pred_pitch<<1] + (m0 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- *curC = r0;
- *(curC += pitch) = r1;
- *(curC += pitch) = r2;
- curC[pitch] = r3;
- curC -= (pitch << 1);
- curC++;
- pred += (1 - pred_pitch);
- }
- coef += 4;
- curC += offset_cur[b4&1];
- pred += offset_pred[b4&1];
- coef += offset_coef[b4&1];
- }
- }
- else // not dropping anything, continue with the IDCT
- {
- for (b4 = 0; b4 < 4; b4++)
- {
- ncoeff = nz_temp[b4] ; // in raster scan
- currMB->nz_coeff[16+(b4&1)+(cr<<1)+((b4>>1)<<2)] = ncoeff; // in raster scan
-
- if (ncoeff) // do a check on the nonzero-coeff
- {
- currMB->CBP |= (2 << 4);
-
- // do inverse transform here
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[2];
- r1 = coef[0] - coef[2];
- r2 = (coef[1] >> 1) - coef[3];
- r3 = coef[1] + (coef[3] >> 1);
-
- coef[0] = r0 + r3;
- coef[1] = r1 + r2;
- coef[2] = r1 - r2;
- coef[3] = r0 - r3;
-
- coef += 16;
- }
- coef -= 64;
- for (j = 4; j > 0; j--)
- {
- r0 = coef[0] + coef[32];
- r1 = coef[0] - coef[32];
- r2 = (coef[16] >> 1) - coef[48];
- r3 = coef[16] + (coef[48] >> 1);
-
- r0 += r3;
- r3 = (r0 - (r3 << 1)); /* r0-r3 */
- r1 += r2;
- r2 = (r1 - (r2 << 1)); /* r1-r2 */
- r0 += 32;
- r1 += 32;
- r2 += 32;
- r3 += 32;
- r0 = pred[0] + (r0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = *(pred += pred_pitch) + (r1 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = pred[pred_pitch] + (r2 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[pred_pitch<<1] + (r3 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- *curC = r0;
- *(curC += pitch) = r1;
- *(curC += pitch) = r2;
- curC[pitch] = r3;
- curC -= (pitch << 1);
- curC++;
- pred += (1 - pred_pitch);
- coef++;
- }
- }
- else
- {
- // do DC-only inverse
- m0 = coef[0] + 32;
-
- for (j = 4; j > 0; j--)
- {
- r0 = pred[0] + (m0 >> 6);
- if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
- r1 = *(pred += pred_pitch) + (m0 >> 6);
- if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
- r2 = pred[pred_pitch] + (m0 >> 6);
- if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
- r3 = pred[pred_pitch<<1] + (m0 >> 6);
- if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
- *curC = r0;
- *(curC += pitch) = r1;
- *(curC += pitch) = r2;
- curC[pitch] = r3;
- curC -= (pitch << 1);
- curC++;
- pred += (1 - pred_pitch);
- }
- coef += 4;
- }
- curC += offset_cur[b4&1];
- pred += offset_pred[b4&1];
- coef += offset_coef[b4&1];
- }
- }
-
- return ;
-}
-
-
-/* only DC transform */
-int TransQuantIntra16DC(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- int16 *block = video->block;
- int *level = encvid->leveldc;
- int *run = encvid->rundc;
- int16 *ptr = block;
- int r0, r1, r2, r3, j;
- int Qq = video->QPy_div_6;
- int Rq = video->QPy_mod_6;
- int q_bits, qp_const, quant;
- int data, lev, zero_run;
- int k, ncoeff, idx;
-
- /* DC transform */
- /* horizontal */
- j = 4;
- while (j)
- {
- r0 = ptr[0] + ptr[12];
- r3 = ptr[0] - ptr[12];
- r1 = ptr[4] + ptr[8];
- r2 = ptr[4] - ptr[8];
-
- ptr[0] = r0 + r1;
- ptr[8] = r0 - r1;
- ptr[4] = r3 + r2;
- ptr[12] = r3 - r2;
- ptr += 64;
- j--;
- }
- /* vertical */
- ptr = block;
- j = 4;
- while (j)
- {
- r0 = ptr[0] + ptr[192];
- r3 = ptr[0] - ptr[192];
- r1 = ptr[64] + ptr[128];
- r2 = ptr[64] - ptr[128];
-
- ptr[0] = (r0 + r1) >> 1;
- ptr[128] = (r0 - r1) >> 1;
- ptr[64] = (r3 + r2) >> 1;
- ptr[192] = (r3 - r2) >> 1;
- ptr += 4;
- j--;
- }
-
- quant = quant_coef[Rq][0];
- q_bits = 15 + Qq;
- qp_const = (1 << q_bits) / 3; // intra
-
- zero_run = 0;
- ncoeff = 0;
-
- for (k = 0; k < 16; k++) /* in zigzag scan order */
- {
- idx = ZIGZAG2RASTERDC[k];
- data = block[idx];
- if (data > 0)
- {
- lev = data * quant + (qp_const << 1);
- }
- else
- {
- lev = -data * quant + (qp_const << 1);
- }
- lev >>= (q_bits + 1);
- if (lev)
- {
- if (data > 0)
- {
- level[ncoeff] = lev;
- block[idx] = lev;
- }
- else
- {
- level[ncoeff] = -lev;
- block[idx] = -lev;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- block[idx] = 0;
- }
- }
- return ncoeff;
-}
-
-int TransQuantChromaDC(AVCEncObject *encvid, int16 *block, int slice_type, int cr)
-{
- AVCCommonObj *video = encvid->common;
- int *level, *run;
- int r0, r1, r2, r3;
- int Qq, Rq, q_bits, qp_const, quant;
- int data, lev, zero_run;
- int k, ncoeff, idx;
-
- level = encvid->levelcdc + (cr << 2); /* cb or cr */
- run = encvid->runcdc + (cr << 2);
-
- /* 2x2 transform of DC components*/
- r0 = block[0];
- r1 = block[4];
- r2 = block[64];
- r3 = block[68];
-
- block[0] = r0 + r1 + r2 + r3;
- block[4] = r0 - r1 + r2 - r3;
- block[64] = r0 + r1 - r2 - r3;
- block[68] = r0 - r1 - r2 + r3;
-
- Qq = video->QPc_div_6;
- Rq = video->QPc_mod_6;
- quant = quant_coef[Rq][0];
- q_bits = 15 + Qq;
- if (slice_type == AVC_I_SLICE)
- {
- qp_const = (1 << q_bits) / 3;
- }
- else
- {
- qp_const = (1 << q_bits) / 6;
- }
-
- zero_run = 0;
- ncoeff = 0;
-
- for (k = 0; k < 4; k++) /* in zigzag scan order */
- {
- idx = ((k >> 1) << 6) + ((k & 1) << 2);
- data = block[idx];
- if (data > 0)
- {
- lev = data * quant + (qp_const << 1);
- }
- else
- {
- lev = -data * quant + (qp_const << 1);
- }
- lev >>= (q_bits + 1);
- if (lev)
- {
- if (data > 0)
- {
- level[ncoeff] = lev;
- block[idx] = lev;
- }
- else
- {
- level[ncoeff] = -lev;
- block[idx] = -lev;
- }
- run[ncoeff++] = zero_run;
- zero_run = 0;
- }
- else
- {
- zero_run++;
- block[idx] = 0;
- }
- }
- return ncoeff;
-}
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
deleted file mode 100644
index 941ae5a..0000000
--- a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
+++ /dev/null
@@ -1,607 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-/* 3/29/01 fast half-pel search based on neighboring guess */
-/* value ranging from 0 to 4, high complexity (more accurate) to
- low complexity (less accurate) */
-#define HP_DISTANCE_TH 5 // 2 /* half-pel distance threshold */
-
-#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
-
-#define CLIP_RESULT(x) if((uint)(x) > 0xFF){ \
- (x) = 0xFF & (~((x)>>31));}
-
-#define CLIP_UPPER16(x) if((uint)(x) >= 0x20000000){ \
- (x) = 0xFF0000 & (~((x)>>31));} \
- else { \
- (x) = ((x)>>5)&0xFF0000; \
- }
-
-/*=====================================================================
- Function: AVCFindHalfPelMB
- Date: 10/31/2007
- Purpose: Find half pel resolution MV surrounding the full-pel MV
-=====================================================================*/
-
-int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,
- int xpos, int ypos, int hp_guess, int cmvx, int cmvy)
-{
- AVCPictureData *currPic = encvid->common->currPic;
- int lx = currPic->pitch;
- int d, dmin, satd_min;
- uint8* cand;
- int lambda_motion = encvid->lambda_motion;
- uint8 *mvbits = encvid->mvbits;
- int mvcost;
- /* list of candidate to go through for half-pel search*/
- uint8 *subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions
- uint8 **hpel_cand = (uint8**) encvid->hpel_cand; /* half-pel position */
-
- int xh[9] = {0, 0, 2, 2, 2, 0, -2, -2, -2};
- int yh[9] = {0, -2, -2, 0, 2, 2, 2, 0, -2};
- int xq[8] = {0, 1, 1, 1, 0, -1, -1, -1};
- int yq[8] = { -1, -1, 0, 1, 1, 1, 0, -1};
- int h, hmin, q, qmin;
-
- OSCL_UNUSED_ARG(xpos);
- OSCL_UNUSED_ARG(ypos);
- OSCL_UNUSED_ARG(hp_guess);
-
- GenerateHalfPelPred(subpel_pred, ncand, lx);
-
- cur = encvid->currYMB; // pre-load current original MB
-
- cand = hpel_cand[0];
-
- // find cost for the current full-pel position
- dmin = SATD_MB(cand, cur, 65535); // get Hadamaard transform SAD
- mvcost = MV_COST_S(lambda_motion, mot->x, mot->y, cmvx, cmvy);
- satd_min = dmin;
- dmin += mvcost;
- hmin = 0;
-
- /* find half-pel */
- for (h = 1; h < 9; h++)
- {
- d = SATD_MB(hpel_cand[h], cur, dmin);
- mvcost = MV_COST_S(lambda_motion, mot->x + xh[h], mot->y + yh[h], cmvx, cmvy);
- d += mvcost;
-
- if (d < dmin)
- {
- dmin = d;
- hmin = h;
- satd_min = d - mvcost;
- }
- }
-
- mot->sad = dmin;
- mot->x += xh[hmin];
- mot->y += yh[hmin];
- encvid->best_hpel_pos = hmin;
-
- /*** search for quarter-pel ****/
- GenerateQuartPelPred(encvid->bilin_base[hmin], &(encvid->qpel_cand[0][0]), hmin);
-
- encvid->best_qpel_pos = qmin = -1;
-
- for (q = 0; q < 8; q++)
- {
- d = SATD_MB(encvid->qpel_cand[q], cur, dmin);
- mvcost = MV_COST_S(lambda_motion, mot->x + xq[q], mot->y + yq[q], cmvx, cmvy);
- d += mvcost;
- if (d < dmin)
- {
- dmin = d;
- qmin = q;
- satd_min = d - mvcost;
- }
- }
-
- if (qmin != -1)
- {
- mot->sad = dmin;
- mot->x += xq[qmin];
- mot->y += yq[qmin];
- encvid->best_qpel_pos = qmin;
- }
-
- return satd_min;
-}
-
-
-
-/** This function generates sub-pel prediction around the full-pel candidate.
-Each sub-pel position array is 20 pixel wide (for word-alignment) and 17 pixel tall. */
-/** The sub-pel position is labeled in spiral manner from the center. */
-
-void GenerateHalfPelPred(uint8* subpel_pred, uint8 *ncand, int lx)
-{
- /* let's do straightforward way first */
- uint8 *ref;
- uint8 *dst;
- uint8 tmp8;
- int32 tmp32;
- int16 tmp_horz[18*22], *dst_16, *src_16;
- int a = 0, b = 0, c = 0, d = 0, e = 0, f = 0; // temp
- int i, j;
-
- /* first copy full-pel to the first array */
- /* to be optimized later based on byte-offset load */
- ref = ncand - 3 - lx - (lx << 1); /* move back (-3,-3) */
- dst = subpel_pred;
-
- dst -= 4; /* offset */
- for (j = 0; j < 22; j++) /* 24x22 */
- {
- i = 6;
- while (i > 0)
- {
- tmp32 = *ref++;
- tmp8 = *ref++;
- tmp32 |= (tmp8 << 8);
- tmp8 = *ref++;
- tmp32 |= (tmp8 << 16);
- tmp8 = *ref++;
- tmp32 |= (tmp8 << 24);
- *((uint32*)(dst += 4)) = tmp32;
- i--;
- }
- ref += (lx - 24);
- }
-
- /* from the first array, we do horizontal interp */
- ref = subpel_pred + 2;
- dst_16 = tmp_horz; /* 17 x 22 */
-
- for (j = 4; j > 0; j--)
- {
- for (i = 16; i > 0; i -= 4)
- {
- a = ref[-2];
- b = ref[-1];
- c = ref[0];
- d = ref[1];
- e = ref[2];
- f = ref[3];
- *dst_16++ = a + f - 5 * (b + e) + 20 * (c + d);
- a = ref[4];
- *dst_16++ = b + a - 5 * (c + f) + 20 * (d + e);
- b = ref[5];
- *dst_16++ = c + b - 5 * (d + a) + 20 * (e + f);
- c = ref[6];
- *dst_16++ = d + c - 5 * (e + b) + 20 * (f + a);
-
- ref += 4;
- }
- /* do the 17th column here */
- d = ref[3];
- *dst_16 = e + d - 5 * (f + c) + 20 * (a + b);
- dst_16 += 2; /* stride for tmp_horz is 18 */
- ref += 8; /* stride for ref is 24 */
- if (j == 3) // move 18 lines down
- {
- dst_16 += 324;//18*18;
- ref += 432;//18*24;
- }
- }
-
- ref -= 480;//20*24;
- dst_16 -= 360;//20*18;
- dst = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* go to the 14th array 17x18*/
-
- for (j = 18; j > 0; j--)
- {
- for (i = 16; i > 0; i -= 4)
- {
- a = ref[-2];
- b = ref[-1];
- c = ref[0];
- d = ref[1];
- e = ref[2];
- f = ref[3];
- tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
- *dst_16++ = tmp32;
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *dst++ = tmp32;
-
- a = ref[4];
- tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
- *dst_16++ = tmp32;
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *dst++ = tmp32;
-
- b = ref[5];
- tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
- *dst_16++ = tmp32;
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *dst++ = tmp32;
-
- c = ref[6];
- tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
- *dst_16++ = tmp32;
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *dst++ = tmp32;
-
- ref += 4;
- }
- /* do the 17th column here */
- d = ref[3];
- tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
- *dst_16 = tmp32;
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *dst = tmp32;
-
- dst += 8; /* stride for dst is 24 */
- dst_16 += 2; /* stride for tmp_horz is 18 */
- ref += 8; /* stride for ref is 24 */
- }
-
-
- /* Do middle point filtering*/
- src_16 = tmp_horz; /* 17 x 22 */
- dst = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* 12th array 17x17*/
- dst -= 24; // offset
- for (i = 0; i < 17; i++)
- {
- for (j = 16; j > 0; j -= 4)
- {
- a = *src_16;
- b = *(src_16 += 18);
- c = *(src_16 += 18);
- d = *(src_16 += 18);
- e = *(src_16 += 18);
- f = *(src_16 += 18);
-
- tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
- tmp32 = (tmp32 + 512) >> 10;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32;
-
- a = *(src_16 += 18);
- tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
- tmp32 = (tmp32 + 512) >> 10;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32;
-
- b = *(src_16 += 18);
- tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
- tmp32 = (tmp32 + 512) >> 10;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32;
-
- c = *(src_16 += 18);
- tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
- tmp32 = (tmp32 + 512) >> 10;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32;
-
- src_16 -= (18 << 2);
- }
-
- d = src_16[90]; // 18*5
- tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
- tmp32 = (tmp32 + 512) >> 10;
- CLIP_RESULT(tmp32)
- dst[24] = tmp32;
-
- src_16 -= ((18 << 4) - 1);
- dst -= ((24 << 4) - 1);
- }
-
- /* do vertical interpolation */
- ref = subpel_pred + 2;
- dst = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; /* 10th array 18x17 */
- dst -= 24; // offset
-
- for (i = 2; i > 0; i--)
- {
- for (j = 16; j > 0; j -= 4)
- {
- a = *ref;
- b = *(ref += 24);
- c = *(ref += 24);
- d = *(ref += 24);
- e = *(ref += 24);
- f = *(ref += 24);
-
- tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- a = *(ref += 24);
- tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- b = *(ref += 24);
- tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- c = *(ref += 24);
- tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- ref -= (24 << 2);
- }
-
- d = ref[120]; // 24*5
- tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- dst[24] = tmp32; // 10th
-
- dst -= ((24 << 4) - 1);
- ref -= ((24 << 4) - 1);
- }
-
- // note that using SIMD here doesn't help much, the cycle almost stays the same
- // one can just use the above code and change the for(i=2 to for(i=18
- for (i = 16; i > 0; i -= 4)
- {
- for (j = 17; j > 0; j--)
- {
- a = *((uint32*)ref); /* load 4 bytes */
- b = (a >> 8) & 0xFF00FF; /* second and fourth byte */
- a &= 0xFF00FF;
-
- c = *((uint32*)(ref + 120));
- d = (c >> 8) & 0xFF00FF;
- c &= 0xFF00FF;
-
- a += c;
- b += d;
-
- e = *((uint32*)(ref + 72)); /* e, f */
- f = (e >> 8) & 0xFF00FF;
- e &= 0xFF00FF;
-
- c = *((uint32*)(ref + 48)); /* c, d */
- d = (c >> 8) & 0xFF00FF;
- c &= 0xFF00FF;
-
- c += e;
- d += f;
-
- a += 20 * c;
- b += 20 * d;
- a += 0x100010;
- b += 0x100010;
-
- e = *((uint32*)(ref += 24)); /* e, f */
- f = (e >> 8) & 0xFF00FF;
- e &= 0xFF00FF;
-
- c = *((uint32*)(ref + 72)); /* c, d */
- d = (c >> 8) & 0xFF00FF;
- c &= 0xFF00FF;
-
- c += e;
- d += f;
-
- a -= 5 * c;
- b -= 5 * d;
-
- c = a << 16;
- d = b << 16;
- CLIP_UPPER16(a)
- CLIP_UPPER16(c)
- CLIP_UPPER16(b)
- CLIP_UPPER16(d)
-
- a |= (c >> 16);
- b |= (d >> 16);
- // a>>=5;
- // b>>=5;
- /* clip */
- // msk |= b; msk|=a;
- // a &= 0xFF00FF;
- // b &= 0xFF00FF;
- a |= (b << 8); /* pack it back */
-
- *((uint16*)(dst += 24)) = a & 0xFFFF; //dst is not word-aligned.
- *((uint16*)(dst + 2)) = a >> 16;
-
- }
- dst -= 404; // 24*17-4
- ref -= 404;
- /* if(msk & 0xFF00FF00) // need clipping
- {
- VertInterpWClip(dst,ref); // re-do 4 column with clip
- }*/
- }
-
- return ;
-}
-
-void VertInterpWClip(uint8 *dst, uint8 *ref)
-{
- int i, j;
- int a, b, c, d, e, f;
- int32 tmp32;
-
- dst -= 4;
- ref -= 4;
-
- for (i = 4; i > 0; i--)
- {
- for (j = 16; j > 0; j -= 4)
- {
- a = *ref;
- b = *(ref += 24);
- c = *(ref += 24);
- d = *(ref += 24);
- e = *(ref += 24);
- f = *(ref += 24);
-
- tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- a = *(ref += 24);
- tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- b = *(ref += 24);
- tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- c = *(ref += 24);
- tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- *(dst += 24) = tmp32; // 10th
-
- ref -= (24 << 2);
- }
-
- d = ref[120]; // 24*5
- tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
- tmp32 = (tmp32 + 16) >> 5;
- CLIP_RESULT(tmp32)
- dst[24] = tmp32; // 10th
-
- dst -= ((24 << 4) - 1);
- ref -= ((24 << 4) - 1);
- }
-
- return ;
-}
-
-
-void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_cand, int hpel_pos)
-{
- // for even value of hpel_pos, start with pattern 1, otherwise, start with pattern 2
- int i, j;
-
- uint8 *c1 = qpel_cand;
- uint8 *tl = bilin_base[0];
- uint8 *tr = bilin_base[1];
- uint8 *bl = bilin_base[2];
- uint8 *br = bilin_base[3];
- int a, b, c, d;
- int offset = 1 - (384 * 7);
-
- if (!(hpel_pos&1)) // diamond pattern
- {
- j = 16;
- while (j--)
- {
- i = 16;
- while (i--)
- {
- d = tr[24];
- a = *tr++;
- b = bl[1];
- c = *br++;
-
- *c1 = (c + a + 1) >> 1;
- *(c1 += 384) = (b + a + 1) >> 1; /* c2 */
- *(c1 += 384) = (b + c + 1) >> 1; /* c3 */
- *(c1 += 384) = (b + d + 1) >> 1; /* c4 */
-
- b = *bl++;
-
- *(c1 += 384) = (c + d + 1) >> 1; /* c5 */
- *(c1 += 384) = (b + d + 1) >> 1; /* c6 */
- *(c1 += 384) = (b + c + 1) >> 1; /* c7 */
- *(c1 += 384) = (b + a + 1) >> 1; /* c8 */
-
- c1 += offset;
- }
- // advance to the next line, pitch is 24
- tl += 8;
- tr += 8;
- bl += 8;
- br += 8;
- c1 += 8;
- }
- }
- else // star pattern
- {
- j = 16;
- while (j--)
- {
- i = 16;
- while (i--)
- {
- a = *br++;
- b = *tr++;
- c = tl[1];
- *c1 = (a + b + 1) >> 1;
- b = bl[1];
- *(c1 += 384) = (a + c + 1) >> 1; /* c2 */
- c = tl[25];
- *(c1 += 384) = (a + b + 1) >> 1; /* c3 */
- b = tr[23];
- *(c1 += 384) = (a + c + 1) >> 1; /* c4 */
- c = tl[24];
- *(c1 += 384) = (a + b + 1) >> 1; /* c5 */
- b = *bl++;
- *(c1 += 384) = (a + c + 1) >> 1; /* c6 */
- c = *tl++;
- *(c1 += 384) = (a + b + 1) >> 1; /* c7 */
- *(c1 += 384) = (a + c + 1) >> 1; /* c8 */
-
- c1 += offset;
- }
- // advance to the next line, pitch is 24
- tl += 8;
- tr += 8;
- bl += 8;
- br += 8;
- c1 += 8;
- }
- }
-
- return ;
-}
-
-
-/* assuming cand always has a pitch of 24 */
-int SATD_MB(uint8 *cand, uint8 *cur, int dmin)
-{
- int cost;
-
-
- dmin = (dmin << 16) | 24;
- cost = AVCSAD_Macroblock_C(cand, cur, dmin, NULL);
-
- return cost;
-}
-
-
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/header.cpp b/media/libstagefright/codecs/avc/enc/src/header.cpp
deleted file mode 100644
index 9acff9e..0000000
--- a/media/libstagefright/codecs/avc/enc/src/header.cpp
+++ /dev/null
@@ -1,917 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-#include "avcenc_api.h"
-
-/** see subclause 7.4.2.1 */
-/* no need for checking the valid range , already done in SetEncodeParam(),
-if we have to send another SPS, the ranges should be verified first before
-users call PVAVCEncodeSPS() */
-AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream)
-{
- AVCCommonObj *video = encvid->common;
- AVCSeqParamSet *seqParam = video->currSeqParams;
- AVCVUIParams *vui = &(seqParam->vui_parameters);
- int i;
- AVCEnc_Status status = AVCENC_SUCCESS;
-
- //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"EncodeSPS",-1,-1);
-
- status = BitstreamWriteBits(stream, 8, seqParam->profile_idc);
- status = BitstreamWrite1Bit(stream, seqParam->constrained_set0_flag);
- status = BitstreamWrite1Bit(stream, seqParam->constrained_set1_flag);
- status = BitstreamWrite1Bit(stream, seqParam->constrained_set2_flag);
- status = BitstreamWrite1Bit(stream, seqParam->constrained_set3_flag);
- status = BitstreamWriteBits(stream, 4, 0); /* forbidden zero bits */
- if (status != AVCENC_SUCCESS) /* we can check after each write also */
- {
- return status;
- }
-
- status = BitstreamWriteBits(stream, 8, seqParam->level_idc);
- status = ue_v(stream, seqParam->seq_parameter_set_id);
- status = ue_v(stream, seqParam->log2_max_frame_num_minus4);
- status = ue_v(stream, seqParam->pic_order_cnt_type);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- if (seqParam->pic_order_cnt_type == 0)
- {
- status = ue_v(stream, seqParam->log2_max_pic_order_cnt_lsb_minus4);
- }
- else if (seqParam->pic_order_cnt_type == 1)
- {
- status = BitstreamWrite1Bit(stream, seqParam->delta_pic_order_always_zero_flag);
- status = se_v(stream, seqParam->offset_for_non_ref_pic); /* upto 32 bits */
- status = se_v(stream, seqParam->offset_for_top_to_bottom_field); /* upto 32 bits */
- status = ue_v(stream, seqParam->num_ref_frames_in_pic_order_cnt_cycle);
-
- for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++)
- {
- status = se_v(stream, seqParam->offset_for_ref_frame[i]); /* upto 32 bits */
- }
- }
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = ue_v(stream, seqParam->num_ref_frames);
- status = BitstreamWrite1Bit(stream, seqParam->gaps_in_frame_num_value_allowed_flag);
- status = ue_v(stream, seqParam->pic_width_in_mbs_minus1);
- status = ue_v(stream, seqParam->pic_height_in_map_units_minus1);
- status = BitstreamWrite1Bit(stream, seqParam->frame_mbs_only_flag);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- /* if frame_mbs_only_flag is 0, then write, mb_adaptive_frame_field_frame here */
-
- status = BitstreamWrite1Bit(stream, seqParam->direct_8x8_inference_flag);
- status = BitstreamWrite1Bit(stream, seqParam->frame_cropping_flag);
- if (seqParam->frame_cropping_flag)
- {
- status = ue_v(stream, seqParam->frame_crop_left_offset);
- status = ue_v(stream, seqParam->frame_crop_right_offset);
- status = ue_v(stream, seqParam->frame_crop_top_offset);
- status = ue_v(stream, seqParam->frame_crop_bottom_offset);
- }
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = BitstreamWrite1Bit(stream, seqParam->vui_parameters_present_flag);
- if (seqParam->vui_parameters_present_flag)
- {
- /* not supported */
- //return AVCENC_SPS_FAIL;
- EncodeVUI(stream, vui);
- }
-
- return status;
-}
-
-
-void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui)
-{
- int temp;
-
- temp = vui->aspect_ratio_info_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWriteBits(stream, 8, vui->aspect_ratio_idc);
- if (vui->aspect_ratio_idc == 255)
- {
- BitstreamWriteBits(stream, 16, vui->sar_width);
- BitstreamWriteBits(stream, 16, vui->sar_height);
- }
- }
- temp = vui->overscan_info_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWrite1Bit(stream, vui->overscan_appropriate_flag);
- }
- temp = vui->video_signal_type_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWriteBits(stream, 3, vui->video_format);
- BitstreamWrite1Bit(stream, vui->video_full_range_flag);
- temp = vui->colour_description_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWriteBits(stream, 8, vui->colour_primaries);
- BitstreamWriteBits(stream, 8, vui->transfer_characteristics);
- BitstreamWriteBits(stream, 8, vui->matrix_coefficients);
- }
- }
- temp = vui->chroma_location_info_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- ue_v(stream, vui->chroma_sample_loc_type_top_field);
- ue_v(stream, vui->chroma_sample_loc_type_bottom_field);
- }
-
- temp = vui->timing_info_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWriteBits(stream, 32, vui->num_units_in_tick);
- BitstreamWriteBits(stream, 32, vui->time_scale);
- BitstreamWrite1Bit(stream, vui->fixed_frame_rate_flag);
- }
-
- temp = vui->nal_hrd_parameters_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- EncodeHRD(stream, &(vui->nal_hrd_parameters));
- }
- temp = vui->vcl_hrd_parameters_present_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- EncodeHRD(stream, &(vui->vcl_hrd_parameters));
- }
- if (vui->nal_hrd_parameters_present_flag || vui->vcl_hrd_parameters_present_flag)
- {
- BitstreamWrite1Bit(stream, vui->low_delay_hrd_flag);
- }
- BitstreamWrite1Bit(stream, vui->pic_struct_present_flag);
- temp = vui->bitstream_restriction_flag;
- BitstreamWrite1Bit(stream, temp);
- if (temp)
- {
- BitstreamWrite1Bit(stream, vui->motion_vectors_over_pic_boundaries_flag);
- ue_v(stream, vui->max_bytes_per_pic_denom);
- ue_v(stream, vui->max_bits_per_mb_denom);
- ue_v(stream, vui->log2_max_mv_length_horizontal);
- ue_v(stream, vui->log2_max_mv_length_vertical);
- ue_v(stream, vui->max_dec_frame_reordering);
- ue_v(stream, vui->max_dec_frame_buffering);
- }
-
- return ;
-}
-
-
-void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd)
-{
- int i;
-
- ue_v(stream, hrd->cpb_cnt_minus1);
- BitstreamWriteBits(stream, 4, hrd->bit_rate_scale);
- BitstreamWriteBits(stream, 4, hrd->cpb_size_scale);
- for (i = 0; i <= (int)hrd->cpb_cnt_minus1; i++)
- {
- ue_v(stream, hrd->bit_rate_value_minus1[i]);
- ue_v(stream, hrd->cpb_size_value_minus1[i]);
- ue_v(stream, hrd->cbr_flag[i]);
- }
- BitstreamWriteBits(stream, 5, hrd->initial_cpb_removal_delay_length_minus1);
- BitstreamWriteBits(stream, 5, hrd->cpb_removal_delay_length_minus1);
- BitstreamWriteBits(stream, 5, hrd->dpb_output_delay_length_minus1);
- BitstreamWriteBits(stream, 5, hrd->time_offset_length);
-
- return ;
-}
-
-
-
-/** see subclause 7.4.2.2 */
-/* no need for checking the valid range , already done in SetEncodeParam().
-If we have to send another SPS, the ranges should be verified first before
-users call PVAVCEncodeSPS()*/
-AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream)
-{
- AVCCommonObj *video = encvid->common;
- AVCEnc_Status status = AVCENC_SUCCESS;
- AVCPicParamSet *picParam = video->currPicParams;
- int i, iGroup, numBits;
- uint temp;
-
- status = ue_v(stream, picParam->pic_parameter_set_id);
- status = ue_v(stream, picParam->seq_parameter_set_id);
- status = BitstreamWrite1Bit(stream, picParam->entropy_coding_mode_flag);
- status = BitstreamWrite1Bit(stream, picParam->pic_order_present_flag);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = ue_v(stream, picParam->num_slice_groups_minus1);
- if (picParam->num_slice_groups_minus1 > 0)
- {
- status = ue_v(stream, picParam->slice_group_map_type);
- if (picParam->slice_group_map_type == 0)
- {
- for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++)
- {
- status = ue_v(stream, picParam->run_length_minus1[iGroup]);
- }
- }
- else if (picParam->slice_group_map_type == 2)
- {
- for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++)
- {
- status = ue_v(stream, picParam->top_left[iGroup]);
- status = ue_v(stream, picParam->bottom_right[iGroup]);
- }
- }
- else if (picParam->slice_group_map_type == 3 ||
- picParam->slice_group_map_type == 4 ||
- picParam->slice_group_map_type == 5)
- {
- status = BitstreamWrite1Bit(stream, picParam->slice_group_change_direction_flag);
- status = ue_v(stream, picParam->slice_group_change_rate_minus1);
- }
- else /*if(picParam->slice_group_map_type == 6)*/
- {
- status = ue_v(stream, picParam->pic_size_in_map_units_minus1);
-
- numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */
- i = picParam->num_slice_groups_minus1;
- while (i > 0)
- {
- numBits++;
- i >>= 1;
- }
-
- for (i = 0; i <= (int)picParam->pic_size_in_map_units_minus1; i++)
- {
- status = BitstreamWriteBits(stream, numBits, picParam->slice_group_id[i]);
- }
- }
- }
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = ue_v(stream, picParam->num_ref_idx_l0_active_minus1);
- status = ue_v(stream, picParam->num_ref_idx_l1_active_minus1);
- status = BitstreamWrite1Bit(stream, picParam->weighted_pred_flag);
- status = BitstreamWriteBits(stream, 2, picParam->weighted_bipred_idc);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = se_v(stream, picParam->pic_init_qp_minus26);
- status = se_v(stream, picParam->pic_init_qs_minus26);
- status = se_v(stream, picParam->chroma_qp_index_offset);
-
- temp = picParam->deblocking_filter_control_present_flag << 2;
- temp |= (picParam->constrained_intra_pred_flag << 1);
- temp |= picParam->redundant_pic_cnt_present_flag;
-
- status = BitstreamWriteBits(stream, 3, temp);
-
- return status;
-}
-
-/** see subclause 7.4.3 */
-AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream)
-{
- AVCCommonObj *video = encvid->common;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCPicParamSet *currPPS = video->currPicParams;
- AVCSeqParamSet *currSPS = video->currSeqParams;
- AVCEnc_Status status = AVCENC_SUCCESS;
- int slice_type, temp, i;
- int num_bits;
-
- num_bits = (stream->write_pos << 3) - stream->bit_left;
-
- status = ue_v(stream, sliceHdr->first_mb_in_slice);
-
- slice_type = video->slice_type;
-
- if (video->mbNum == 0) /* first mb in frame */
- {
- status = ue_v(stream, sliceHdr->slice_type);
- }
- else
- {
- status = ue_v(stream, slice_type);
- }
-
- status = ue_v(stream, sliceHdr->pic_parameter_set_id);
-
- status = BitstreamWriteBits(stream, currSPS->log2_max_frame_num_minus4 + 4, sliceHdr->frame_num);
-
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- /* if frame_mbs_only_flag is 0, encode field_pic_flag, bottom_field_flag here */
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- status = ue_v(stream, sliceHdr->idr_pic_id);
- }
-
- if (currSPS->pic_order_cnt_type == 0)
- {
- status = BitstreamWriteBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4,
- sliceHdr->pic_order_cnt_lsb);
-
- if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)
- {
- status = se_v(stream, sliceHdr->delta_pic_order_cnt_bottom); /* 32 bits */
- }
- }
- if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag)
- {
- status = se_v(stream, sliceHdr->delta_pic_order_cnt[0]); /* 32 bits */
- if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)
- {
- status = se_v(stream, sliceHdr->delta_pic_order_cnt[1]); /* 32 bits */
- }
- }
-
- if (currPPS->redundant_pic_cnt_present_flag)
- {
- status = ue_v(stream, sliceHdr->redundant_pic_cnt);
- }
-
- if (slice_type == AVC_B_SLICE)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->direct_spatial_mv_pred_flag);
- }
-
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- if (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE || slice_type == AVC_B_SLICE)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->num_ref_idx_active_override_flag);
- if (sliceHdr->num_ref_idx_active_override_flag)
- {
- /* we shouldn't enter this part at all */
- status = ue_v(stream, sliceHdr->num_ref_idx_l0_active_minus1);
- if (slice_type == AVC_B_SLICE)
- {
- status = ue_v(stream, sliceHdr->num_ref_idx_l1_active_minus1);
- }
- }
- }
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- /* ref_pic_list_reordering() */
- status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||
- (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))
- {
- // pred_weight_table(); // not supported !!
- return AVCENC_PRED_WEIGHT_TAB_FAIL;
- }
-
- if (video->nal_ref_idc != 0)
- {
- status = dec_ref_pic_marking(video, stream, sliceHdr);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
-
- if (currPPS->entropy_coding_mode_flag && slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
- {
- return AVCENC_CABAC_FAIL;
- /* ue_v(stream,&(sliceHdr->cabac_init_idc));
- if(sliceHdr->cabac_init_idc > 2){
- // not supported !!!!
- }*/
- }
-
- status = se_v(stream, sliceHdr->slice_qp_delta);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- if (slice_type == AVC_SP_SLICE || slice_type == AVC_SI_SLICE)
- {
- if (slice_type == AVC_SP_SLICE)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->sp_for_switch_flag);
- /* if sp_for_switch_flag is 0, P macroblocks in SP slice is decoded using
- SP decoding process for non-switching pictures in 8.6.1 */
- /* else, P macroblocks in SP slice is decoded using SP and SI decoding
- process for switching picture in 8.6.2 */
- }
- status = se_v(stream, sliceHdr->slice_qs_delta);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
-
- if (currPPS->deblocking_filter_control_present_flag)
- {
-
- status = ue_v(stream, sliceHdr->disable_deblocking_filter_idc);
-
- if (sliceHdr->disable_deblocking_filter_idc != 1)
- {
- status = se_v(stream, sliceHdr->slice_alpha_c0_offset_div2);
-
- status = se_v(stream, sliceHdr->slice_beta_offset_div_2);
- }
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
-
- if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3
- && currPPS->slice_group_map_type <= 5)
- {
- /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */
- temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate;
- if (video->PicSizeInMapUnits % video->SliceGroupChangeRate)
- {
- temp++;
- }
- i = 0;
- while (temp > 1)
- {
- temp >>= 1;
- i++;
- }
-
- BitstreamWriteBits(stream, i, sliceHdr->slice_group_change_cycle);
- }
-
-
- encvid->rateCtrl->NumberofHeaderBits += (stream->write_pos << 3) - stream->bit_left - num_bits;
-
- return AVCENC_SUCCESS;
-}
-
-/** see subclause 7.4.3.1 */
-AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type)
-{
- (void)(video);
- int i;
- AVCEnc_Status status = AVCENC_SUCCESS;
-
- if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l0);
- if (sliceHdr->ref_pic_list_reordering_flag_l0)
- {
- i = 0;
- do
- {
- status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l0[i]);
- if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 ||
- sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1)
- {
- status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l0[i]);
- /* this check should be in InitSlice(), if we ever use it */
- /*if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 &&
- sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -1)
- {
- return AVCENC_REF_PIC_REORDER_FAIL; // out of range
- }
- if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 &&
- sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -2)
- {
- return AVCENC_REF_PIC_REORDER_FAIL; // out of range
- }*/
- }
- else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2)
- {
- status = ue_v(stream, sliceHdr->long_term_pic_num_l0[i]);
- }
- i++;
- }
- while (sliceHdr->reordering_of_pic_nums_idc_l0[i] != 3
- && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ;
- }
- }
- if (slice_type == AVC_B_SLICE)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l1);
- if (sliceHdr->ref_pic_list_reordering_flag_l1)
- {
- i = 0;
- do
- {
- status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l1[i]);
- if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 ||
- sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1)
- {
- status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l1[i]);
- /* This check should be in InitSlice() if we ever use it
- if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 &&
- sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -1)
- {
- return AVCENC_REF_PIC_REORDER_FAIL; // out of range
- }
- if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1 &&
- sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -2)
- {
- return AVCENC_REF_PIC_REORDER_FAIL; // out of range
- }*/
- }
- else if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 2)
- {
- status = ue_v(stream, sliceHdr->long_term_pic_num_l1[i]);
- }
- i++;
- }
- while (sliceHdr->reordering_of_pic_nums_idc_l1[i] != 3
- && i <= (int)sliceHdr->num_ref_idx_l1_active_minus1 + 1) ;
- }
- }
-
- return status;
-}
-
-/** see subclause 7.4.3.3 */
-AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr)
-{
- int i;
- AVCEnc_Status status = AVCENC_SUCCESS;
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->no_output_of_prior_pics_flag);
- status = BitstreamWrite1Bit(stream, sliceHdr->long_term_reference_flag);
- if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */
- {
- video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */
- }
- else /* used for long-term */
- {
- video->MaxLongTermFrameIdx = 0;
- video->LongTermFrameIdx = 0;
- }
- }
- else
- {
- status = BitstreamWrite1Bit(stream, sliceHdr->adaptive_ref_pic_marking_mode_flag); /* default to zero */
- if (sliceHdr->adaptive_ref_pic_marking_mode_flag)
- {
- i = 0;
- do
- {
- status = ue_v(stream, sliceHdr->memory_management_control_operation[i]);
- if (sliceHdr->memory_management_control_operation[i] == 1 ||
- sliceHdr->memory_management_control_operation[i] == 3)
- {
- status = ue_v(stream, sliceHdr->difference_of_pic_nums_minus1[i]);
- }
- if (sliceHdr->memory_management_control_operation[i] == 2)
- {
- status = ue_v(stream, sliceHdr->long_term_pic_num[i]);
- }
- if (sliceHdr->memory_management_control_operation[i] == 3 ||
- sliceHdr->memory_management_control_operation[i] == 6)
- {
- status = ue_v(stream, sliceHdr->long_term_frame_idx[i]);
- }
- if (sliceHdr->memory_management_control_operation[i] == 4)
- {
- status = ue_v(stream, sliceHdr->max_long_term_frame_idx_plus1[i]);
- }
- i++;
- }
- while (sliceHdr->memory_management_control_operation[i] != 0 && i < MAX_DEC_REF_PIC_MARKING);
- if (i >= MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[i] != 0)
- {
- return AVCENC_DEC_REF_PIC_MARK_FAIL; /* we're screwed!!, not enough memory */
- }
- }
- }
-
- return status;
-}
-
-/* see subclause 8.2.1 Decoding process for picture order count.
-See also PostPOC() for initialization of some variables. */
-AVCEnc_Status InitPOC(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCSeqParamSet *currSPS = video->currSeqParams;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCFrameIO *currInput = encvid->currInput;
- int i;
-
- switch (currSPS->pic_order_cnt_type)
- {
- case 0: /* POC MODE 0 , subclause 8.2.1.1 */
- /* encoding part */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- encvid->dispOrdPOCRef = currInput->disp_order;
- }
- while (currInput->disp_order < encvid->dispOrdPOCRef)
- {
- encvid->dispOrdPOCRef -= video->MaxPicOrderCntLsb;
- }
- sliceHdr->pic_order_cnt_lsb = currInput->disp_order - encvid->dispOrdPOCRef;
- while (sliceHdr->pic_order_cnt_lsb >= video->MaxPicOrderCntLsb)
- {
- sliceHdr->pic_order_cnt_lsb -= video->MaxPicOrderCntLsb;
- }
- /* decoding part */
- /* Calculate the MSBs of current picture */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->prevPicOrderCntMsb = 0;
- video->prevPicOrderCntLsb = 0;
- }
- if (sliceHdr->pic_order_cnt_lsb < video->prevPicOrderCntLsb &&
- (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb) >= (video->MaxPicOrderCntLsb / 2))
- video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb;
- else if (sliceHdr->pic_order_cnt_lsb > video->prevPicOrderCntLsb &&
- (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb) > (video->MaxPicOrderCntLsb / 2))
- video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb;
- else
- video->PicOrderCntMsb = video->prevPicOrderCntMsb;
-
- /* JVT-I010 page 81 is different from JM7.3 */
- if (!sliceHdr->field_pic_flag || !sliceHdr->bottom_field_flag)
- {
- video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;
- }
-
- if (!sliceHdr->field_pic_flag)
- {
- video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom;
- }
- else if (sliceHdr->bottom_field_flag)
- {
- video->PicOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;
- }
-
- if (!sliceHdr->field_pic_flag)
- {
- video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);
- }
-
- if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)
- {
- sliceHdr->delta_pic_order_cnt_bottom = 0; /* defaulted to zero */
- }
-
- break;
- case 1: /* POC MODE 1, subclause 8.2.1.2 */
- /* calculate FrameNumOffset */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- encvid->dispOrdPOCRef = currInput->disp_order; /* reset the reference point */
- video->prevFrameNumOffset = 0;
- video->FrameNumOffset = 0;
- }
- else if (video->prevFrameNum > sliceHdr->frame_num)
- {
- video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
- }
- else
- {
- video->FrameNumOffset = video->prevFrameNumOffset;
- }
- /* calculate absFrameNum */
- if (currSPS->num_ref_frames_in_pic_order_cnt_cycle)
- {
- video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num;
- }
- else
- {
- video->absFrameNum = 0;
- }
-
- if (video->absFrameNum > 0 && video->nal_ref_idc == 0)
- {
- video->absFrameNum--;
- }
-
- /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */
- if (video->absFrameNum > 0)
- {
- video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle;
- video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle;
- }
- /* derive expectedDeltaPerPicOrderCntCycle, this value can be computed up front. */
- video->expectedDeltaPerPicOrderCntCycle = 0;
- for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++)
- {
- video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i];
- }
- /* derive expectedPicOrderCnt */
- if (video->absFrameNum)
- {
- video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle;
- for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++)
- {
- video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i];
- }
- }
- else
- {
- video->expectedPicOrderCnt = 0;
- }
-
- if (video->nal_ref_idc == 0)
- {
- video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic;
- }
- /* derive TopFieldOrderCnt and BottomFieldOrderCnt */
- /* encoding part */
- if (!currSPS->delta_pic_order_always_zero_flag)
- {
- sliceHdr->delta_pic_order_cnt[0] = currInput->disp_order - encvid->dispOrdPOCRef - video->expectedPicOrderCnt;
-
- if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)
- {
- sliceHdr->delta_pic_order_cnt[1] = sliceHdr->delta_pic_order_cnt[0]; /* should be calculated from currInput->bottom_field->disp_order */
- }
- else
- {
- sliceHdr->delta_pic_order_cnt[1] = 0;
- }
- }
- else
- {
- sliceHdr->delta_pic_order_cnt[0] = sliceHdr->delta_pic_order_cnt[1] = 0;
- }
-
- if (sliceHdr->field_pic_flag == 0)
- {
- video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];
- video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1];
-
- video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);
- }
- else if (sliceHdr->bottom_field_flag == 0)
- {
- video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];
- video->PicOrderCnt = video->TopFieldOrderCnt;
- }
- else
- {
- video->BottomFieldOrderCnt = video->expectedPicOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[0];
- video->PicOrderCnt = video->BottomFieldOrderCnt;
- }
- break;
-
-
- case 2: /* POC MODE 2, subclause 8.2.1.3 */
- /* decoding order must be the same as display order */
- /* we don't check for that. The decoder will just output in decoding order. */
- /* Check for 2 consecutive non-reference frame */
- if (video->nal_ref_idc == 0)
- {
- if (encvid->dispOrdPOCRef == 1)
- {
- return AVCENC_CONSECUTIVE_NONREF;
- }
- encvid->dispOrdPOCRef = 1; /* act as a flag for non ref */
- }
- else
- {
- encvid->dispOrdPOCRef = 0;
- }
-
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->FrameNumOffset = 0;
- }
- else if (video->prevFrameNum > sliceHdr->frame_num)
- {
- video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
- }
- else
- {
- video->FrameNumOffset = video->prevFrameNumOffset;
- }
- /* derive tempPicOrderCnt, we just use PicOrderCnt */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->PicOrderCnt = 0;
- }
- else if (video->nal_ref_idc == 0)
- {
- video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1;
- }
- else
- {
- video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num);
- }
- /* derive TopFieldOrderCnt and BottomFieldOrderCnt */
- if (sliceHdr->field_pic_flag == 0)
- {
- video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt;
- }
- else if (sliceHdr->bottom_field_flag)
- {
- video->BottomFieldOrderCnt = video->PicOrderCnt;
- }
- else
- {
- video->TopFieldOrderCnt = video->PicOrderCnt;
- }
- break;
- default:
- return AVCENC_POC_FAIL;
- }
-
- return AVCENC_SUCCESS;
-}
-
-/** see subclause 8.2.1 */
-AVCEnc_Status PostPOC(AVCCommonObj *video)
-{
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCSeqParamSet *currSPS = video->currSeqParams;
-
- video->prevFrameNum = sliceHdr->frame_num;
-
- switch (currSPS->pic_order_cnt_type)
- {
- case 0: /* subclause 8.2.1.1 */
- if (video->mem_mgr_ctrl_eq_5)
- {
- video->prevPicOrderCntMsb = 0;
- video->prevPicOrderCntLsb = video->TopFieldOrderCnt;
- }
- else
- {
- video->prevPicOrderCntMsb = video->PicOrderCntMsb;
- video->prevPicOrderCntLsb = sliceHdr->pic_order_cnt_lsb;
- }
- break;
- case 1: /* subclause 8.2.1.2 and 8.2.1.3 */
- case 2:
- if (video->mem_mgr_ctrl_eq_5)
- {
- video->prevFrameNumOffset = 0;
- }
- else
- {
- video->prevFrameNumOffset = video->FrameNumOffset;
- }
- break;
- }
-
- return AVCENC_SUCCESS;
-}
-
diff --git a/media/libstagefright/codecs/avc/enc/src/init.cpp b/media/libstagefright/codecs/avc/enc/src/init.cpp
deleted file mode 100644
index 6e1413a..0000000
--- a/media/libstagefright/codecs/avc/enc/src/init.cpp
+++ /dev/null
@@ -1,895 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-#include "avcenc_api.h"
-
-#define LOG2_MAX_FRAME_NUM_MINUS4 12 /* 12 default */
-#define SLICE_GROUP_CHANGE_CYCLE 1 /* default */
-
-/* initialized variables to be used in SPS*/
-AVCEnc_Status SetEncodeParam(AVCHandle* avcHandle, AVCEncParams* encParam,
- void* extSPS, void* extPPS)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCCommonObj *video = encvid->common;
- AVCSeqParamSet *seqParam = video->currSeqParams;
- AVCPicParamSet *picParam = video->currPicParams;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCEnc_Status status;
- void *userData = avcHandle->userData;
- int ii, maxFrameNum;
-
- AVCSeqParamSet* extS = NULL;
- AVCPicParamSet* extP = NULL;
-
- if (extSPS) extS = (AVCSeqParamSet*) extSPS;
- if (extPPS) extP = (AVCPicParamSet*) extPPS;
-
- /* This part sets the default values of the encoding options this
- library supports in seqParam, picParam and sliceHdr structures and
- also copy the values from the encParam into the above 3 structures.
-
- Some parameters will be assigned later when we encode SPS or PPS such as
- the seq_parameter_id or pic_parameter_id. Also some of the slice parameters
- have to be re-assigned per slice basis such as frame_num, slice_type,
- first_mb_in_slice, pic_order_cnt_lsb, slice_qp_delta, slice_group_change_cycle */
-
- /* profile_idc, constrained_setx_flag and level_idc is set by VerifyProfile(),
- and VerifyLevel() functions later. */
-
- encvid->fullsearch_enable = encParam->fullsearch;
-
- encvid->outOfBandParamSet = ((encParam->out_of_band_param_set == AVC_ON) ? TRUE : FALSE);
-
- /* parameters derived from the the encParam that are used in SPS */
- if (extS)
- {
- video->MaxPicOrderCntLsb = 1 << (extS->log2_max_pic_order_cnt_lsb_minus4 + 4);
- video->PicWidthInMbs = extS->pic_width_in_mbs_minus1 + 1;
- video->PicHeightInMapUnits = extS->pic_height_in_map_units_minus1 + 1 ;
- video->FrameHeightInMbs = (2 - extS->frame_mbs_only_flag) * video->PicHeightInMapUnits ;
- }
- else
- {
- video->MaxPicOrderCntLsb = 1 << (encParam->log2_max_poc_lsb_minus_4 + 4);
- video->PicWidthInMbs = (encParam->width + 15) >> 4; /* round it to multiple of 16 */
- video->FrameHeightInMbs = (encParam->height + 15) >> 4; /* round it to multiple of 16 */
- video->PicHeightInMapUnits = video->FrameHeightInMbs;
- }
-
- video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ;
- if (video->PicWidthInSamplesL + 32 > 0xFFFF)
- {
- return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch
- }
-
- video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ;
- video->PicHeightInMbs = video->FrameHeightInMbs;
- video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ;
- video->PicHeightInSamplesL = video->PicHeightInMbs * 16;
- video->PicHeightInSamplesC = video->PicHeightInMbs * 8;
- video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs;
-
- if (!extS && !extP)
- {
- maxFrameNum = (encParam->idr_period == -1) ? (1 << 16) : encParam->idr_period;
- ii = 0;
- while (maxFrameNum > 0)
- {
- ii++;
- maxFrameNum >>= 1;
- }
- if (ii < 4) ii = 4;
- else if (ii > 16) ii = 16;
-
- seqParam->log2_max_frame_num_minus4 = ii - 4;//LOG2_MAX_FRAME_NUM_MINUS4; /* default */
-
- video->MaxFrameNum = 1 << ii; //(LOG2_MAX_FRAME_NUM_MINUS4 + 4); /* default */
- video->MaxPicNum = video->MaxFrameNum;
-
- /************* set the SPS *******************/
- seqParam->seq_parameter_set_id = 0; /* start with zero */
- /* POC */
- seqParam->pic_order_cnt_type = encParam->poc_type; /* POC type */
- if (encParam->poc_type == 0)
- {
- if (/*encParam->log2_max_poc_lsb_minus_4<0 || (no need, it's unsigned)*/
- encParam->log2_max_poc_lsb_minus_4 > 12)
- {
- return AVCENC_INVALID_POC_LSB;
- }
- seqParam->log2_max_pic_order_cnt_lsb_minus4 = encParam->log2_max_poc_lsb_minus_4;
- }
- else if (encParam->poc_type == 1)
- {
- seqParam->delta_pic_order_always_zero_flag = encParam->delta_poc_zero_flag;
- seqParam->offset_for_non_ref_pic = encParam->offset_poc_non_ref;
- seqParam->offset_for_top_to_bottom_field = encParam->offset_top_bottom;
- seqParam->num_ref_frames_in_pic_order_cnt_cycle = encParam->num_ref_in_cycle;
- if (encParam->offset_poc_ref == NULL)
- {
- return AVCENC_ENCPARAM_MEM_FAIL;
- }
- for (ii = 0; ii < encParam->num_ref_frame; ii++)
- {
- seqParam->offset_for_ref_frame[ii] = encParam->offset_poc_ref[ii];
- }
- }
- /* number of reference frame */
- if (encParam->num_ref_frame > 16 || encParam->num_ref_frame < 0)
- {
- return AVCENC_INVALID_NUM_REF;
- }
- seqParam->num_ref_frames = encParam->num_ref_frame; /* num reference frame range 0...16*/
- seqParam->gaps_in_frame_num_value_allowed_flag = FALSE;
- seqParam->pic_width_in_mbs_minus1 = video->PicWidthInMbs - 1;
- seqParam->pic_height_in_map_units_minus1 = video->PicHeightInMapUnits - 1;
- seqParam->frame_mbs_only_flag = TRUE;
- seqParam->mb_adaptive_frame_field_flag = FALSE;
- seqParam->direct_8x8_inference_flag = FALSE; /* default */
- seqParam->frame_cropping_flag = FALSE;
- seqParam->frame_crop_bottom_offset = 0;
- seqParam->frame_crop_left_offset = 0;
- seqParam->frame_crop_right_offset = 0;
- seqParam->frame_crop_top_offset = 0;
- seqParam->vui_parameters_present_flag = FALSE; /* default */
- }
- else if (extS) // use external SPS and PPS
- {
- seqParam->seq_parameter_set_id = extS->seq_parameter_set_id;
- seqParam->log2_max_frame_num_minus4 = extS->log2_max_frame_num_minus4;
- video->MaxFrameNum = 1 << (extS->log2_max_frame_num_minus4 + 4);
- video->MaxPicNum = video->MaxFrameNum;
- if (encParam->idr_period > (int)(video->MaxFrameNum) || (encParam->idr_period == -1))
- {
- encParam->idr_period = (int)video->MaxFrameNum;
- }
-
- seqParam->pic_order_cnt_type = extS->pic_order_cnt_type;
- if (seqParam->pic_order_cnt_type == 0)
- {
- if (/*extS->log2_max_pic_order_cnt_lsb_minus4<0 || (no need it's unsigned)*/
- extS->log2_max_pic_order_cnt_lsb_minus4 > 12)
- {
- return AVCENC_INVALID_POC_LSB;
- }
- seqParam->log2_max_pic_order_cnt_lsb_minus4 = extS->log2_max_pic_order_cnt_lsb_minus4;
- }
- else if (seqParam->pic_order_cnt_type == 1)
- {
- seqParam->delta_pic_order_always_zero_flag = extS->delta_pic_order_always_zero_flag;
- seqParam->offset_for_non_ref_pic = extS->offset_for_non_ref_pic;
- seqParam->offset_for_top_to_bottom_field = extS->offset_for_top_to_bottom_field;
- seqParam->num_ref_frames_in_pic_order_cnt_cycle = extS->num_ref_frames_in_pic_order_cnt_cycle;
- for (ii = 0; ii < (int) extS->num_ref_frames; ii++)
- {
- seqParam->offset_for_ref_frame[ii] = extS->offset_for_ref_frame[ii];
- }
- }
- /* number of reference frame */
- if (extS->num_ref_frames > 16 /*|| extS->num_ref_frames<0 (no need, it's unsigned)*/)
- {
- return AVCENC_INVALID_NUM_REF;
- }
- seqParam->num_ref_frames = extS->num_ref_frames; /* num reference frame range 0...16*/
- seqParam->gaps_in_frame_num_value_allowed_flag = extS->gaps_in_frame_num_value_allowed_flag;
- seqParam->pic_width_in_mbs_minus1 = extS->pic_width_in_mbs_minus1;
- seqParam->pic_height_in_map_units_minus1 = extS->pic_height_in_map_units_minus1;
- seqParam->frame_mbs_only_flag = extS->frame_mbs_only_flag;
- if (extS->frame_mbs_only_flag != TRUE)
- {
- return AVCENC_NOT_SUPPORTED;
- }
- seqParam->mb_adaptive_frame_field_flag = extS->mb_adaptive_frame_field_flag;
- if (extS->mb_adaptive_frame_field_flag != FALSE)
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- seqParam->direct_8x8_inference_flag = extS->direct_8x8_inference_flag;
- seqParam->frame_cropping_flag = extS->frame_cropping_flag ;
- if (extS->frame_cropping_flag != FALSE)
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- seqParam->frame_crop_bottom_offset = 0;
- seqParam->frame_crop_left_offset = 0;
- seqParam->frame_crop_right_offset = 0;
- seqParam->frame_crop_top_offset = 0;
- seqParam->vui_parameters_present_flag = extS->vui_parameters_present_flag;
- if (extS->vui_parameters_present_flag)
- {
- memcpy(&(seqParam->vui_parameters), &(extS->vui_parameters), sizeof(AVCVUIParams));
- }
- }
- else
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- /***************** now PPS ******************************/
- if (!extP && !extS)
- {
- picParam->pic_parameter_set_id = (uint)(-1); /* start with zero */
- picParam->seq_parameter_set_id = (uint)(-1); /* start with zero */
- picParam->entropy_coding_mode_flag = 0; /* default to CAVLC */
- picParam->pic_order_present_flag = 0; /* default for now, will need it for B-slice */
- /* FMO */
- if (encParam->num_slice_group < 1 || encParam->num_slice_group > MAX_NUM_SLICE_GROUP)
- {
- return AVCENC_INVALID_NUM_SLICEGROUP;
- }
- picParam->num_slice_groups_minus1 = encParam->num_slice_group - 1;
-
- if (picParam->num_slice_groups_minus1 > 0)
- {
- picParam->slice_group_map_type = encParam->fmo_type;
- switch (encParam->fmo_type)
- {
- case 0:
- for (ii = 0; ii <= (int)picParam->num_slice_groups_minus1; ii++)
- {
- picParam->run_length_minus1[ii] = encParam->run_length_minus1[ii];
- }
- break;
- case 2:
- for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)
- {
- picParam->top_left[ii] = encParam->top_left[ii];
- picParam->bottom_right[ii] = encParam->bottom_right[ii];
- }
- break;
- case 3:
- case 4:
- case 5:
- if (encParam->change_dir_flag == AVC_ON)
- {
- picParam->slice_group_change_direction_flag = TRUE;
- }
- else
- {
- picParam->slice_group_change_direction_flag = FALSE;
- }
- if (/*encParam->change_rate_minus1 < 0 || (no need it's unsigned) */
- encParam->change_rate_minus1 > video->PicSizeInMapUnits - 1)
- {
- return AVCENC_INVALID_CHANGE_RATE;
- }
- picParam->slice_group_change_rate_minus1 = encParam->change_rate_minus1;
- video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;
- break;
- case 6:
- picParam->pic_size_in_map_units_minus1 = video->PicSizeInMapUnits - 1;
-
- /* allocate picParam->slice_group_id */
- picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);
- if (picParam->slice_group_id == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- if (encParam->slice_group == NULL)
- {
- return AVCENC_ENCPARAM_MEM_FAIL;
- }
- for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)
- {
- picParam->slice_group_id[ii] = encParam->slice_group[ii];
- }
- break;
- default:
- return AVCENC_INVALID_FMO_TYPE;
- }
- }
- picParam->num_ref_idx_l0_active_minus1 = encParam->num_ref_frame - 1; /* assume frame only */
- picParam->num_ref_idx_l1_active_minus1 = 0; /* default value */
- picParam->weighted_pred_flag = 0; /* no weighted prediction supported */
- picParam->weighted_bipred_idc = 0; /* range 0,1,2 */
- if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */
- picParam->weighted_bipred_idc > 2)
- {
- return AVCENC_WEIGHTED_BIPRED_FAIL;
- }
- picParam->pic_init_qp_minus26 = 0; /* default, will be changed at slice level anyway */
- if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)
- {
- return AVCENC_INIT_QP_FAIL; /* out of range */
- }
- picParam->pic_init_qs_minus26 = 0;
- if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)
- {
- return AVCENC_INIT_QS_FAIL; /* out of range */
- }
-
- picParam->chroma_qp_index_offset = 0; /* default to zero for now */
- if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)
- {
- return AVCENC_CHROMA_QP_FAIL; /* out of range */
- }
- /* deblocking */
- picParam->deblocking_filter_control_present_flag = (encParam->db_filter == AVC_ON) ? TRUE : FALSE ;
- /* constrained intra prediction */
- picParam->constrained_intra_pred_flag = (encParam->constrained_intra_pred == AVC_ON) ? TRUE : FALSE;
- picParam->redundant_pic_cnt_present_flag = 0; /* default */
- }
- else if (extP)// external PPS
- {
- picParam->pic_parameter_set_id = extP->pic_parameter_set_id - 1; /* to be increased by one */
- picParam->seq_parameter_set_id = extP->seq_parameter_set_id;
- picParam->entropy_coding_mode_flag = extP->entropy_coding_mode_flag;
- if (extP->entropy_coding_mode_flag != 0) /* default to CAVLC */
- {
- return AVCENC_NOT_SUPPORTED;
- }
- picParam->pic_order_present_flag = extP->pic_order_present_flag; /* default for now, will need it for B-slice */
- if (extP->pic_order_present_flag != 0)
- {
- return AVCENC_NOT_SUPPORTED;
- }
- /* FMO */
- if (/*(extP->num_slice_groups_minus1<0) || (no need it's unsigned) */
- (extP->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1))
- {
- return AVCENC_INVALID_NUM_SLICEGROUP;
- }
- picParam->num_slice_groups_minus1 = extP->num_slice_groups_minus1;
-
- if (picParam->num_slice_groups_minus1 > 0)
- {
- picParam->slice_group_map_type = extP->slice_group_map_type;
- switch (extP->slice_group_map_type)
- {
- case 0:
- for (ii = 0; ii <= (int)extP->num_slice_groups_minus1; ii++)
- {
- picParam->run_length_minus1[ii] = extP->run_length_minus1[ii];
- }
- break;
- case 2:
- for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)
- {
- picParam->top_left[ii] = extP->top_left[ii];
- picParam->bottom_right[ii] = extP->bottom_right[ii];
- }
- break;
- case 3:
- case 4:
- case 5:
- picParam->slice_group_change_direction_flag = extP->slice_group_change_direction_flag;
- if (/*extP->slice_group_change_rate_minus1 < 0 || (no need, it's unsigned) */
- extP->slice_group_change_rate_minus1 > video->PicSizeInMapUnits - 1)
- {
- return AVCENC_INVALID_CHANGE_RATE;
- }
- picParam->slice_group_change_rate_minus1 = extP->slice_group_change_rate_minus1;
- video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;
- break;
- case 6:
- if (extP->pic_size_in_map_units_minus1 != video->PicSizeInMapUnits - 1)
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- picParam->pic_size_in_map_units_minus1 = extP->pic_size_in_map_units_minus1;
-
- /* allocate picParam->slice_group_id */
- picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);
- if (picParam->slice_group_id == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- if (extP->slice_group_id == NULL)
- {
- return AVCENC_ENCPARAM_MEM_FAIL;
- }
- for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)
- {
- picParam->slice_group_id[ii] = extP->slice_group_id[ii];
- }
- break;
- default:
- return AVCENC_INVALID_FMO_TYPE;
- }
- }
- picParam->num_ref_idx_l0_active_minus1 = extP->num_ref_idx_l0_active_minus1;
- picParam->num_ref_idx_l1_active_minus1 = extP->num_ref_idx_l1_active_minus1; /* default value */
- if (picParam->num_ref_idx_l1_active_minus1 != 0)
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- if (extP->weighted_pred_flag)
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- picParam->weighted_pred_flag = 0; /* no weighted prediction supported */
- picParam->weighted_bipred_idc = extP->weighted_bipred_idc; /* range 0,1,2 */
- if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */
- picParam->weighted_bipred_idc > 2)
- {
- return AVCENC_WEIGHTED_BIPRED_FAIL;
- }
- picParam->pic_init_qp_minus26 = extP->pic_init_qp_minus26; /* default, will be changed at slice level anyway */
- if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)
- {
- return AVCENC_INIT_QP_FAIL; /* out of range */
- }
- picParam->pic_init_qs_minus26 = extP->pic_init_qs_minus26;
- if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)
- {
- return AVCENC_INIT_QS_FAIL; /* out of range */
- }
-
- picParam->chroma_qp_index_offset = extP->chroma_qp_index_offset; /* default to zero for now */
- if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)
- {
- return AVCENC_CHROMA_QP_FAIL; /* out of range */
- }
- /* deblocking */
- picParam->deblocking_filter_control_present_flag = extP->deblocking_filter_control_present_flag;
- /* constrained intra prediction */
- picParam->constrained_intra_pred_flag = extP->constrained_intra_pred_flag;
- if (extP->redundant_pic_cnt_present_flag != 0)
- {
- return AVCENC_NOT_SUPPORTED;
- }
- picParam->redundant_pic_cnt_present_flag = extP->redundant_pic_cnt_present_flag; /* default */
- }
- else
- {
- return AVCENC_NOT_SUPPORTED;
- }
-
- /****************** now set up some SliceHeader parameters ***********/
- if (picParam->deblocking_filter_control_present_flag == TRUE)
- {
- /* these values only present when db_filter is ON */
- if (encParam->disable_db_idc > 2)
- {
- return AVCENC_INVALID_DEBLOCK_IDC; /* out of range */
- }
- sliceHdr->disable_deblocking_filter_idc = encParam->disable_db_idc;
-
- if (encParam->alpha_offset < -6 || encParam->alpha_offset > 6)
- {
- return AVCENC_INVALID_ALPHA_OFFSET;
- }
- sliceHdr->slice_alpha_c0_offset_div2 = encParam->alpha_offset;
-
- if (encParam->beta_offset < -6 || encParam->beta_offset > 6)
- {
- return AVCENC_INVALID_BETA_OFFSET;
- }
- sliceHdr->slice_beta_offset_div_2 = encParam->beta_offset;
- }
- if (encvid->outOfBandParamSet == TRUE)
- {
- sliceHdr->idr_pic_id = 0;
- }
- else
- {
- sliceHdr->idr_pic_id = (uint)(-1); /* start with zero */
- }
- sliceHdr->field_pic_flag = FALSE;
- sliceHdr->bottom_field_flag = FALSE; /* won't be used anyway */
- video->MbaffFrameFlag = (seqParam->mb_adaptive_frame_field_flag && !sliceHdr->field_pic_flag);
-
- /* the rest will be set in InitSlice() */
-
- /* now the rate control and performance related parameters */
- rateCtrl->scdEnable = (encParam->auto_scd == AVC_ON) ? TRUE : FALSE;
- rateCtrl->idrPeriod = encParam->idr_period + 1;
- rateCtrl->intraMBRate = encParam->intramb_refresh;
- rateCtrl->dpEnable = (encParam->data_par == AVC_ON) ? TRUE : FALSE;
-
- rateCtrl->subPelEnable = (encParam->sub_pel == AVC_ON) ? TRUE : FALSE;
- rateCtrl->mvRange = encParam->search_range;
-
- rateCtrl->subMBEnable = (encParam->submb_pred == AVC_ON) ? TRUE : FALSE;
- rateCtrl->rdOptEnable = (encParam->rdopt_mode == AVC_ON) ? TRUE : FALSE;
- rateCtrl->bidirPred = (encParam->bidir_pred == AVC_ON) ? TRUE : FALSE;
-
- rateCtrl->rcEnable = (encParam->rate_control == AVC_ON) ? TRUE : FALSE;
- rateCtrl->initQP = encParam->initQP;
- rateCtrl->initQP = AVC_CLIP3(0, 51, rateCtrl->initQP);
-
- rateCtrl->bitRate = encParam->bitrate;
- rateCtrl->cpbSize = encParam->CPB_size;
- rateCtrl->initDelayOffset = (rateCtrl->bitRate * encParam->init_CBP_removal_delay / 1000);
-
- if (encParam->frame_rate == 0)
- {
- return AVCENC_INVALID_FRAMERATE;
- }
-
- rateCtrl->frame_rate = (OsclFloat)(encParam->frame_rate * 1.0 / 1000);
-// rateCtrl->srcInterval = encParam->src_interval;
- rateCtrl->first_frame = 1; /* set this flag for the first time */
-
- /* contrained_setx_flag will be set inside the VerifyProfile called below.*/
- if (!extS && !extP)
- {
- seqParam->profile_idc = encParam->profile;
- seqParam->constrained_set0_flag = FALSE;
- seqParam->constrained_set1_flag = FALSE;
- seqParam->constrained_set2_flag = FALSE;
- seqParam->constrained_set3_flag = FALSE;
- seqParam->level_idc = encParam->level;
- }
- else
- {
- seqParam->profile_idc = extS->profile_idc;
- seqParam->constrained_set0_flag = extS->constrained_set0_flag;
- seqParam->constrained_set1_flag = extS->constrained_set1_flag;
- seqParam->constrained_set2_flag = extS->constrained_set2_flag;
- seqParam->constrained_set3_flag = extS->constrained_set3_flag;
- seqParam->level_idc = extS->level_idc;
- }
-
-
- status = VerifyProfile(encvid, seqParam, picParam);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- status = VerifyLevel(encvid, seqParam, picParam);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
-
- return AVCENC_SUCCESS;
-}
-
-/* verify the profile setting */
-AVCEnc_Status VerifyProfile(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)
-{
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCEnc_Status status = AVCENC_SUCCESS;
-
- if (seqParam->profile_idc == 0) /* find profile for this setting */
- {
- /* find the right profile for it */
- if (seqParam->direct_8x8_inference_flag == TRUE &&
- picParam->entropy_coding_mode_flag == FALSE &&
- picParam->num_slice_groups_minus1 <= 7 /*&&
- picParam->num_slice_groups_minus1>=0 (no need, it's unsigned) */)
- {
- seqParam->profile_idc = AVC_EXTENDED;
- seqParam->constrained_set2_flag = TRUE;
- }
-
- if (rateCtrl->dpEnable == FALSE &&
- picParam->num_slice_groups_minus1 == 0 &&
- picParam->redundant_pic_cnt_present_flag == FALSE)
- {
- seqParam->profile_idc = AVC_MAIN;
- seqParam->constrained_set1_flag = TRUE;
- }
-
- if (rateCtrl->bidirPred == FALSE &&
- rateCtrl->dpEnable == FALSE &&
- seqParam->frame_mbs_only_flag == TRUE &&
- picParam->weighted_pred_flag == FALSE &&
- picParam->weighted_bipred_idc == 0 &&
- picParam->entropy_coding_mode_flag == FALSE &&
- picParam->num_slice_groups_minus1 <= 7 /*&&
- picParam->num_slice_groups_minus1>=0 (no need, it's unsigned)*/)
- {
- seqParam->profile_idc = AVC_BASELINE;
- seqParam->constrained_set0_flag = TRUE;
- }
-
- if (seqParam->profile_idc == 0) /* still zero */
- {
- return AVCENC_PROFILE_NOT_SUPPORTED;
- }
- }
-
- /* check the list of supported profile by this library */
- switch (seqParam->profile_idc)
- {
- case AVC_BASELINE:
- if (rateCtrl->bidirPred == TRUE ||
- rateCtrl->dpEnable == TRUE ||
- seqParam->frame_mbs_only_flag != TRUE ||
- picParam->weighted_pred_flag == TRUE ||
- picParam->weighted_bipred_idc != 0 ||
- picParam->entropy_coding_mode_flag == TRUE ||
- picParam->num_slice_groups_minus1 > 7 /*||
- picParam->num_slice_groups_minus1<0 (no need, it's unsigned) */)
- {
- status = AVCENC_TOOLS_NOT_SUPPORTED;
- }
- break;
-
- case AVC_MAIN:
- case AVC_EXTENDED:
- status = AVCENC_PROFILE_NOT_SUPPORTED;
- }
-
- return status;
-}
-
-/* verify the level setting */
-AVCEnc_Status VerifyLevel(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)
-{
- (void)(picParam);
-
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCCommonObj *video = encvid->common;
- int mb_per_sec, ii;
- int lev_idx;
- int dpb_size;
-
- mb_per_sec = (int)(video->PicSizeInMbs * rateCtrl->frame_rate + 0.5);
- dpb_size = (seqParam->num_ref_frames * video->PicSizeInMbs * 3) >> 6;
-
- if (seqParam->level_idc == 0) /* find level for this setting */
- {
- for (ii = 0; ii < MAX_LEVEL_IDX; ii++)
- {
- if (mb_per_sec <= MaxMBPS[ii] &&
- video->PicSizeInMbs <= (uint)MaxFS[ii] &&
- rateCtrl->bitRate <= (int32)MaxBR[ii]*1000 &&
- rateCtrl->cpbSize <= (int32)MaxCPB[ii]*1000 &&
- rateCtrl->mvRange <= MaxVmvR[ii] &&
- dpb_size <= MaxDPBX2[ii]*512)
- {
- seqParam->level_idc = mapIdx2Lev[ii];
- break;
- }
- }
- if (seqParam->level_idc == 0)
- {
- return AVCENC_LEVEL_NOT_SUPPORTED;
- }
- }
-
- /* check if this level is supported by this library */
- lev_idx = mapLev2Idx[seqParam->level_idc];
- if (seqParam->level_idc == AVC_LEVEL1_B)
- {
- seqParam->constrained_set3_flag = 1;
- }
-
-
- if (lev_idx == 255) /* not defined */
- {
- return AVCENC_LEVEL_NOT_SUPPORTED;
- }
-
- /* check if the encoding setting complies with the level */
- if (mb_per_sec > MaxMBPS[lev_idx] ||
- video->PicSizeInMbs > (uint)MaxFS[lev_idx] ||
- rateCtrl->bitRate > (int32)MaxBR[lev_idx]*1000 ||
- rateCtrl->cpbSize > (int32)MaxCPB[lev_idx]*1000 ||
- rateCtrl->mvRange > MaxVmvR[lev_idx])
- {
- return AVCENC_LEVEL_FAIL;
- }
-
- return AVCENC_SUCCESS;
-}
-
-/* initialize variables at the beginning of each frame */
-/* determine the picture type */
-/* encode POC */
-/* maybe we should do more stuff here. MotionEstimation+SCD and generate a new SPS and PPS */
-AVCEnc_Status InitFrame(AVCEncObject *encvid)
-{
- AVCStatus ret;
- AVCEnc_Status status;
- AVCCommonObj *video = encvid->common;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
-
- /* look for the next frame in coding_order and look for available picture
- in the DPB. Note, video->currFS->PicOrderCnt, currFS->FrameNum and currPic->PicNum
- are set to wrong number in this function (right for decoder). */
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- // call init DPB in here.
- ret = AVCConfigureSequence(encvid->avcHandle, video, TRUE);
- if (ret != AVC_SUCCESS)
- {
- return AVCENC_FAIL;
- }
- }
-
- /* flexible macroblock ordering (every frame)*/
- /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */
- /* It changes once per each PPS. */
- FMOInit(video);
-
- ret = DPBInitBuffer(encvid->avcHandle, video); // get new buffer
-
- if (ret != AVC_SUCCESS)
- {
- return (AVCEnc_Status)ret; // AVCENC_PICTURE_READY, FAIL
- }
-
- DPBInitPic(video, 0); /* 0 is dummy */
-
- /************* determine picture type IDR or non-IDR ***********/
- video->currPicType = AVC_FRAME;
- video->slice_data_partitioning = FALSE;
- encvid->currInput->is_reference = 1; /* default to all frames */
- video->nal_ref_idc = 1; /* need to set this for InitPOC */
- video->currPic->isReference = TRUE;
-
- /************* set frame_num ********************/
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- video->prevFrameNum = video->MaxFrameNum;
- video->PrevRefFrameNum = 0;
- sliceHdr->frame_num = 0;
- }
- /* otherwise, it's set to previous reference frame access unit's frame_num in decoding order,
- see the end of PVAVCDecodeSlice()*/
- /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */
- /* Basically, frame_num can't be repeated unless it's opposite fields or non reference fields */
- else
- {
- sliceHdr->frame_num = (video->PrevRefFrameNum + 1) % video->MaxFrameNum;
- }
- video->CurrPicNum = sliceHdr->frame_num; /* for field_pic_flag = 0 */
- //video->CurrPicNum = 2*sliceHdr->frame_num + 1; /* for field_pic_flag = 1 */
-
- /* assign pic_order_cnt, video->PicOrderCnt */
- status = InitPOC(encvid);
- if (status != AVCENC_SUCCESS) /* incorrigable fail */
- {
- return status;
- }
-
- /* Initialize refListIdx for this picture */
- RefListInit(video);
-
- /************* motion estimation and scene analysis ************/
- // , to move this to MB-based MV search for comparison
- // use sub-optimal QP for mv search
- AVCMotionEstimation(encvid); /* AVCENC_SUCCESS or AVCENC_NEW_IDR */
-
- /* after this point, the picture type will be fixed to either IDR or non-IDR */
- video->currFS->PicOrderCnt = video->PicOrderCnt;
- video->currFS->FrameNum = video->sliceHdr->frame_num;
- video->currPic->PicNum = video->CurrPicNum;
- video->mbNum = 0; /* start from zero MB */
- encvid->currSliceGroup = 0; /* start from slice group #0 */
- encvid->numIntraMB = 0; /* reset this counter */
-
- if (video->nal_unit_type == AVC_NALTYPE_IDR)
- {
- RCInitGOP(encvid);
-
- /* calculate picture QP */
- RCInitFrameQP(encvid);
-
- return AVCENC_NEW_IDR;
- }
-
- /* calculate picture QP */
- RCInitFrameQP(encvid); /* get QP after MV search */
-
- return AVCENC_SUCCESS;
-}
-
-/* initialize variables for this slice */
-AVCEnc_Status InitSlice(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCPicParamSet *currPPS = video->currPicParams;
- AVCSeqParamSet *currSPS = video->currSeqParams;
- int slice_type = video->slice_type;
-
- sliceHdr->first_mb_in_slice = video->mbNum;
- if (video->mbNum) // not first slice of a frame
- {
- video->sliceHdr->slice_type = (AVCSliceType)slice_type;
- }
-
- /* sliceHdr->slice_type already set in InitFrame */
-
- sliceHdr->pic_parameter_set_id = video->currPicParams->pic_parameter_set_id;
-
- /* sliceHdr->frame_num already set in InitFrame */
-
- if (!currSPS->frame_mbs_only_flag) /* we shouldn't need this check */
- {
- sliceHdr->field_pic_flag = sliceHdr->bottom_field_flag = FALSE;
- return AVCENC_TOOLS_NOT_SUPPORTED;
- }
-
- /* sliceHdr->idr_pic_id already set in PVAVCEncodeNAL
-
- sliceHdr->pic_order_cnt_lsb already set in InitFrame..InitPOC
- sliceHdr->delta_pic_order_cnt_bottom already set in InitPOC
-
- sliceHdr->delta_pic_order_cnt[0] already set in InitPOC
- sliceHdr->delta_pic_order_cnt[1] already set in InitPOC
- */
-
- sliceHdr->redundant_pic_cnt = 0; /* default if(currPPS->redundant_pic_cnt_present_flag), range 0..127 */
- sliceHdr->direct_spatial_mv_pred_flag = 0; // default if(slice_type == AVC_B_SLICE)
-
- sliceHdr->num_ref_idx_active_override_flag = FALSE; /* default, if(slice_type== P,SP or B)*/
- sliceHdr->num_ref_idx_l0_active_minus1 = 0; /* default, if (num_ref_idx_active_override_flag) */
- sliceHdr->num_ref_idx_l1_active_minus1 = 0; /* default, if above and B_slice */
- /* the above 2 values range from 0..15 for frame picture and 0..31 for field picture */
-
- /* ref_pic_list_reordering(), currently we don't do anything */
- sliceHdr->ref_pic_list_reordering_flag_l0 = FALSE; /* default */
- sliceHdr->ref_pic_list_reordering_flag_l1 = FALSE; /* default */
- /* if the above are TRUE, some other params must be set */
-
- if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||
- (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))
- {
- // pred_weight_table(); // not supported !!
- return AVCENC_TOOLS_NOT_SUPPORTED;
- }
-
- /* dec_ref_pic_marking(), this will be done later*/
- sliceHdr->no_output_of_prior_pics_flag = FALSE; /* default */
- sliceHdr->long_term_reference_flag = FALSE; /* for IDR frame, do not make it long term */
- sliceHdr->adaptive_ref_pic_marking_mode_flag = FALSE; /* default */
- /* other params are not set here because they are not used */
-
- sliceHdr->cabac_init_idc = 0; /* default, if entropy_coding_mode_flag && slice_type==I or SI, range 0..2 */
- sliceHdr->slice_qp_delta = 0; /* default for now */
- sliceHdr->sp_for_switch_flag = FALSE; /* default, if slice_type == SP */
- sliceHdr->slice_qs_delta = 0; /* default, if slice_type == SP or SI */
-
- /* derived variables from encParam */
- /* deblocking filter */
- video->FilterOffsetA = video->FilterOffsetB = 0;
- if (currPPS->deblocking_filter_control_present_flag == TRUE)
- {
- video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1;
- video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1;
- }
-
- /* flexible macroblock ordering */
- /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */
- /* We already call it at the end of PVAVCEncInitialize(). It changes once per each PPS. */
- if (video->currPicParams->num_slice_groups_minus1 > 0 && video->currPicParams->slice_group_map_type >= 3
- && video->currPicParams->slice_group_map_type <= 5)
- {
- sliceHdr->slice_group_change_cycle = SLICE_GROUP_CHANGE_CYCLE; /* default, don't understand how to set it!!!*/
-
- video->MapUnitsInSliceGroup0 =
- AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits);
-
- FMOInit(video);
- }
-
- /* calculate SliceQPy first */
- /* calculate QSy first */
-
- sliceHdr->slice_qp_delta = video->QPy - 26 - currPPS->pic_init_qp_minus26;
- //sliceHdr->slice_qs_delta = video->QSy - 26 - currPPS->pic_init_qs_minus26;
-
- return AVCENC_SUCCESS;
-}
-
diff --git a/media/libstagefright/codecs/avc/enc/src/intra_est.cpp b/media/libstagefright/codecs/avc/enc/src/intra_est.cpp
deleted file mode 100644
index e397805..0000000
--- a/media/libstagefright/codecs/avc/enc/src/intra_est.cpp
+++ /dev/null
@@ -1,2199 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-#define TH_I4 0 /* threshold biasing toward I16 mode instead of I4 mode */
-#define TH_Intra 0 /* threshold biasing toward INTER mode instead of intra mode */
-
-#define FIXED_INTRAPRED_MODE AVC_I16
-#define FIXED_I16_MODE AVC_I16_DC
-#define FIXED_I4_MODE AVC_I4_Diagonal_Down_Left
-#define FIXED_INTRA_CHROMA_MODE AVC_IC_DC
-
-#define CLIP_RESULT(x) if((uint)(x) > 0xFF){ \
- (x) = 0xFF & (~((x)>>31));}
-
-
-bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch)
-{
- AVCCommonObj *video = encvid->common;
- AVCFrameIO *currInput = encvid->currInput;
- int orgPitch = currInput->pitch;
- int x_pos = (video->mb_x) << 4;
- int y_pos = (video->mb_y) << 4;
- uint8 *orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;
- int j;
- uint8 *topL, *leftL, *orgY_2, *orgY_3;
- int temp, SBE, offset;
- OsclFloat ABE;
- bool intra = true;
-
- if (((x_pos >> 4) != (int)video->PicWidthInMbs - 1) &&
- ((y_pos >> 4) != (int)video->PicHeightInMbs - 1) &&
- video->intraAvailA &&
- video->intraAvailB)
- {
- SBE = 0;
- /* top neighbor */
- topL = curL - picPitch;
- /* left neighbor */
- leftL = curL - 1;
- orgY_2 = orgY - orgPitch;
-
- for (j = 0; j < 16; j++)
- {
- temp = *topL++ - orgY[j];
- SBE += ((temp >= 0) ? temp : -temp);
- temp = *(leftL += picPitch) - *(orgY_2 += orgPitch);
- SBE += ((temp >= 0) ? temp : -temp);
- }
-
- /* calculate chroma */
- offset = (y_pos >> 2) * picPitch + (x_pos >> 1);
- topL = video->currPic->Scb + offset;
- orgY_2 = currInput->YCbCr[1] + offset + (y_pos >> 2) * (orgPitch - picPitch);
-
- leftL = topL - 1;
- topL -= (picPitch >> 1);
- orgY_3 = orgY_2 - (orgPitch >> 1);
- for (j = 0; j < 8; j++)
- {
- temp = *topL++ - orgY_2[j];
- SBE += ((temp >= 0) ? temp : -temp);
- temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));
- SBE += ((temp >= 0) ? temp : -temp);
- }
-
- topL = video->currPic->Scr + offset;
- orgY_2 = currInput->YCbCr[2] + offset + (y_pos >> 2) * (orgPitch - picPitch);
-
- leftL = topL - 1;
- topL -= (picPitch >> 1);
- orgY_3 = orgY_2 - (orgPitch >> 1);
- for (j = 0; j < 8; j++)
- {
- temp = *topL++ - orgY_2[j];
- SBE += ((temp >= 0) ? temp : -temp);
- temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));
- SBE += ((temp >= 0) ? temp : -temp);
- }
-
- /* compare mincost/384 and SBE/64 */
- ABE = SBE / 64.0;
- if (ABE*0.8 >= min_cost / 384.0)
- {
- intra = false;
- }
- }
-
- return intra;
-}
-
-/* perform searching for MB mode */
-/* assuming that this is done inside the encoding loop,
-no need to call InitNeighborAvailability */
-
-void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch)
-{
- AVCCommonObj *video = encvid->common;
- AVCFrameIO *currInput = encvid->currInput;
- AVCMacroblock *currMB = video->currMB;
- int min_cost;
- uint8 *orgY;
- int x_pos = (video->mb_x) << 4;
- int y_pos = (video->mb_y) << 4;
- uint32 *saved_inter;
- int j;
- int orgPitch = currInput->pitch;
- bool intra = true;
-
- currMB->CBP = 0;
-
- /* first do motion vector and variable block size search */
- min_cost = encvid->min_cost[mbnum];
-
- /* now perform intra prediction search */
- /* need to add the check for encvid->intraSearch[video->mbNum] to skip intra
- if it's not worth checking. */
- if (video->slice_type == AVC_P_SLICE)
- {
- /* Decide whether intra search is necessary or not */
- /* This one, we do it in the encoding loop so the neighboring pixel are the
- actual reconstructed pixels. */
- intra = IntraDecisionABE(encvid, min_cost, curL, picPitch);
- }
-
- if (intra == true || video->slice_type == AVC_I_SLICE)
- {
- orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;
-
- /* i16 mode search */
- /* generate all the predictions */
- intrapred_luma_16x16(encvid);
-
- /* evaluate them one by one */
- find_cost_16x16(encvid, orgY, &min_cost);
-
- if (video->slice_type == AVC_P_SLICE)
- {
- /* save current inter prediction */
- saved_inter = encvid->subpel_pred; /* reuse existing buffer */
- j = 16;
- curL -= 4;
- picPitch -= 16;
- while (j--)
- {
- *saved_inter++ = *((uint32*)(curL += 4));
- *saved_inter++ = *((uint32*)(curL += 4));
- *saved_inter++ = *((uint32*)(curL += 4));
- *saved_inter++ = *((uint32*)(curL += 4));
- curL += picPitch;
- }
-
- }
-
- /* i4 mode search */
- mb_intra4x4_search(encvid, &min_cost);
-
- encvid->min_cost[mbnum] = min_cost; /* update min_cost */
- }
-
-
- if (currMB->mb_intra)
- {
- chroma_intra_search(encvid);
-
- /* need to set this in order for the MBInterPrediction to work!! */
- memset(currMB->mvL0, 0, sizeof(int32)*16);
- currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =
- currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = -1;
- }
- else if (video->slice_type == AVC_P_SLICE && intra == true)
- {
- /* restore current inter prediction */
- saved_inter = encvid->subpel_pred; /* reuse existing buffer */
- j = 16;
- curL -= ((picPitch + 16) << 4);
- while (j--)
- {
- *((uint32*)(curL += 4)) = *saved_inter++;
- *((uint32*)(curL += 4)) = *saved_inter++;
- *((uint32*)(curL += 4)) = *saved_inter++;
- *((uint32*)(curL += 4)) = *saved_inter++;
- curL += picPitch;
- }
- }
-
- return ;
-}
-
-/* generate all the prediction values */
-void intrapred_luma_16x16(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCPictureData *currPic = video->currPic;
-
- int x_pos = (video->mb_x) << 4;
- int y_pos = (video->mb_y) << 4;
- int pitch = currPic->pitch;
-
- int offset = y_pos * pitch + x_pos;
-
- uint8 *pred, *top, *left;
- uint8 *curL = currPic->Sl + offset; /* point to reconstructed frame */
- uint32 word1, word2, word3, word4;
- uint32 sum = 0;
-
- int a_16, b, c, factor_c;
- uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
- int H = 0, V = 0, tmp, value;
- int i;
-
- if (video->intraAvailB)
- {
- //get vertical prediction mode
- top = curL - pitch;
-
- pred = encvid->pred_i16[AVC_I16_Vertical] - 16;
-
- word1 = *((uint32*)(top)); /* read 4 bytes from top */
- word2 = *((uint32*)(top + 4)); /* read 4 bytes from top */
- word3 = *((uint32*)(top + 8)); /* read 4 bytes from top */
- word4 = *((uint32*)(top + 12)); /* read 4 bytes from top */
-
- for (i = 0; i < 16; i++)
- {
- *((uint32*)(pred += 16)) = word1;
- *((uint32*)(pred + 4)) = word2;
- *((uint32*)(pred + 8)) = word3;
- *((uint32*)(pred + 12)) = word4;
-
- }
-
- sum = word1 & 0xFF00FF;
- word1 = (word1 >> 8) & 0xFF00FF;
- sum += word1;
- word1 = (word2 & 0xFF00FF);
- sum += word1;
- word2 = (word2 >> 8) & 0xFF00FF;
- sum += word2;
- word1 = (word3 & 0xFF00FF);
- sum += word1;
- word3 = (word3 >> 8) & 0xFF00FF;
- sum += word3;
- word1 = (word4 & 0xFF00FF);
- sum += word1;
- word4 = (word4 >> 8) & 0xFF00FF;
- sum += word4;
-
- sum += (sum >> 16);
- sum &= 0xFFFF;
-
- if (!video->intraAvailA)
- {
- sum = (sum + 8) >> 4;
- }
- }
-
- if (video->intraAvailA)
- {
- // get horizontal mode
- left = curL - 1 - pitch;
-
- pred = encvid->pred_i16[AVC_I16_Horizontal] - 16;
-
- for (i = 0; i < 16; i++)
- {
- word1 = *(left += pitch);
- sum += word1;
-
- word1 = (word1 << 8) | word1;
- word1 = (word1 << 16) | word1; /* make it 4 */
-
- *(uint32*)(pred += 16) = word1;
- *(uint32*)(pred + 4) = word1;
- *(uint32*)(pred + 8) = word1;
- *(uint32*)(pred + 12) = word1;
- }
-
- if (!video->intraAvailB)
- {
- sum = (sum + 8) >> 4;
- }
- else
- {
- sum = (sum + 16) >> 5;
- }
- }
-
- // get DC mode
- if (!video->intraAvailA && !video->intraAvailB)
- {
- sum = 0x80808080;
- }
- else
- {
- sum = (sum << 8) | sum;
- sum = (sum << 16) | sum;
- }
-
- pred = encvid->pred_i16[AVC_I16_DC] - 16;
- for (i = 0; i < 16; i++)
- {
- *((uint32*)(pred += 16)) = sum;
- *((uint32*)(pred + 4)) = sum;
- *((uint32*)(pred + 8)) = sum;
- *((uint32*)(pred + 12)) = sum;
- }
-
- // get plane mode
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- pred = encvid->pred_i16[AVC_I16_Plane] - 16;
-
- comp_ref_x0 = curL - pitch + 8;
- comp_ref_x1 = curL - pitch + 6;
- comp_ref_y0 = curL - 1 + (pitch << 3);
- comp_ref_y1 = curL - 1 + 6 * pitch;
-
- for (i = 1; i < 8; i++)
- {
- H += i * (*comp_ref_x0++ - *comp_ref_x1--);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
- comp_ref_y0 += pitch;
- comp_ref_y1 -= pitch;
- }
-
- H += i * (*comp_ref_x0++ - curL[-pitch-1]);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
-
-
- a_16 = ((*(curL - pitch + 15) + *(curL - 1 + 15 * pitch)) << 4) + 16;;
- b = (5 * H + 32) >> 6;
- c = (5 * V + 32) >> 6;
-
- tmp = 0;
- for (i = 0; i < 16; i++)
- {
- factor_c = a_16 + c * (tmp++ - 7);
- factor_c -= 7 * b;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 16);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 24);
- *((uint32*)(pred += 16)) = word1;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 16);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 24);
- *((uint32*)(pred + 4)) = word1;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 16);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 24);
- *((uint32*)(pred + 8)) = word1;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 16);
- value = factor_c >> 5;
- CLIP_RESULT(value)
- word1 = (word1) | (value << 24);
- *((uint32*)(pred + 12)) = word1;
- }
- }
-
- return ;
-}
-
-
-/* evaluate each prediction mode of I16 */
-void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost)
-{
- AVCCommonObj *video = encvid->common;
- AVCMacroblock *currMB = video->currMB;
- int cost;
- int org_pitch = encvid->currInput->pitch;
-
- /* evaluate vertical mode */
- if (video->intraAvailB)
- {
- cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Vertical], *min_cost);
- if (cost < *min_cost)
- {
- *min_cost = cost;
- currMB->mbMode = AVC_I16;
- currMB->mb_intra = 1;
- currMB->i16Mode = AVC_I16_Vertical;
- }
- }
-
-
- /* evaluate horizontal mode */
- if (video->intraAvailA)
- {
- cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Horizontal], *min_cost);
- if (cost < *min_cost)
- {
- *min_cost = cost;
- currMB->mbMode = AVC_I16;
- currMB->mb_intra = 1;
- currMB->i16Mode = AVC_I16_Horizontal;
- }
- }
-
- /* evaluate DC mode */
- cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_DC], *min_cost);
- if (cost < *min_cost)
- {
- *min_cost = cost;
- currMB->mbMode = AVC_I16;
- currMB->mb_intra = 1;
- currMB->i16Mode = AVC_I16_DC;
- }
-
- /* evaluate plane mode */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Plane], *min_cost);
- if (cost < *min_cost)
- {
- *min_cost = cost;
- currMB->mbMode = AVC_I16;
- currMB->mb_intra = 1;
- currMB->i16Mode = AVC_I16_Plane;
- }
- }
-
- return ;
-}
-
-
-int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost)
-{
-
- int cost;
- int j, k;
- int16 res[256], *pres; // residue
- int m0, m1, m2, m3;
-
- // calculate SATD
- org_pitch -= 16;
- pres = res;
- // horizontal transform
- for (j = 0; j < 16; j++)
- {
- k = 4;
- while (k > 0)
- {
- m0 = org[0] - pred[0];
- m3 = org[3] - pred[3];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = org[1] - pred[1];
- m2 = org[2] - pred[2];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[2] = m0 - m1;
- pres[1] = m2 + m3;
- pres[3] = m3 - m2;
-
- org += 4;
- pres += 4;
- pred += 4;
- k--;
- }
- org += org_pitch;
- }
- /* vertical transform */
- cost = 0;
- for (j = 0; j < 4; j++)
- {
- pres = res + (j << 6);
- k = 16;
- while (k > 0)
- {
- m0 = pres[0];
- m3 = pres[3<<4];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = pres[1<<4];
- m2 = pres[2<<4];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 = m0 + m1;
-
- if (k&0x3) // only sum up non DC values.
- {
- cost += ((m0 > 0) ? m0 : -m0);
- }
-
- m1 = m0 - (m1 << 1);
- cost += ((m1 > 0) ? m1 : -m1);
- m3 = m2 + m3;
- cost += ((m3 > 0) ? m3 : -m3);
- m2 = m3 - (m2 << 1);
- cost += ((m2 > 0) ? m2 : -m2);
-
- pres++;
- k--;
- }
- if ((cost >> 1) > min_cost) /* early drop out */
- {
- return (cost >> 1);
- }
- }
-
- /* Hadamard of the DC coefficient */
- pres = res;
- k = 4;
- while (k > 0)
- {
- m0 = pres[0];
- m3 = pres[3<<2];
- m0 >>= 2;
- m0 += (m3 >> 2);
- m3 = m0 - (m3 >> 1);
- m1 = pres[1<<2];
- m2 = pres[2<<2];
- m1 >>= 2;
- m1 += (m2 >> 2);
- m2 = m1 - (m2 >> 1);
- pres[0] = (m0 + m1);
- pres[2<<2] = (m0 - m1);
- pres[1<<2] = (m2 + m3);
- pres[3<<2] = (m3 - m2);
- pres += (4 << 4);
- k--;
- }
-
- pres = res;
- k = 4;
- while (k > 0)
- {
- m0 = pres[0];
- m3 = pres[3<<6];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = pres[1<<6];
- m2 = pres[2<<6];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- m0 = m0 + m1;
- cost += ((m0 >= 0) ? m0 : -m0);
- m1 = m0 - (m1 << 1);
- cost += ((m1 >= 0) ? m1 : -m1);
- m3 = m2 + m3;
- cost += ((m3 >= 0) ? m3 : -m3);
- m2 = m3 - (m2 << 1);
- cost += ((m2 >= 0) ? m2 : -m2);
- pres += 4;
-
- if ((cost >> 1) > min_cost) /* early drop out */
- {
- return (cost >> 1);
- }
-
- k--;
- }
-
- return (cost >> 1);
-}
-
-
-void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost)
-{
- AVCCommonObj *video = encvid->common;
- AVCMacroblock *currMB = video->currMB;
- AVCPictureData *currPic = video->currPic;
- AVCFrameIO *currInput = encvid->currInput;
- int pitch = currPic->pitch;
- int org_pitch = currInput->pitch;
- int offset;
- uint8 *curL, *comp, *org4, *org8;
- int y = video->mb_y << 4;
- int x = video->mb_x << 4;
-
- int b8, b4, cost4x4, blkidx;
- int cost = 0;
- int numcoef;
- int dummy = 0;
- int mb_intra = currMB->mb_intra; // save the original value
-
- offset = y * pitch + x;
-
- curL = currPic->Sl + offset;
- org8 = currInput->YCbCr[0] + y * org_pitch + x;
- video->pred_pitch = 4;
-
- cost = (int)(6.0 * encvid->lambda_mode + 0.4999);
- cost <<= 2;
-
- currMB->mb_intra = 1; // temporary set this to one to enable the IDCT
- // operation inside dct_luma
-
- for (b8 = 0; b8 < 4; b8++)
- {
- comp = curL;
- org4 = org8;
-
- for (b4 = 0; b4 < 4; b4++)
- {
- blkidx = blkIdx2blkXY[b8][b4];
- cost4x4 = blk_intra4x4_search(encvid, blkidx, comp, org4);
- cost += cost4x4;
- if (cost > *min_cost)
- {
- currMB->mb_intra = mb_intra; // restore the value
- return ;
- }
-
- /* do residue, Xfrm, Q, invQ, invXfrm, recon and save the DCT coefs.*/
- video->pred_block = encvid->pred_i4[currMB->i4Mode[blkidx]];
- numcoef = dct_luma(encvid, blkidx, comp, org4, &dummy);
- currMB->nz_coeff[blkidx] = numcoef;
- if (numcoef)
- {
- video->cbp4x4 |= (1 << blkidx);
- currMB->CBP |= (1 << b8);
- }
-
- if (b4&1)
- {
- comp += ((pitch << 2) - 4);
- org4 += ((org_pitch << 2) - 4);
- }
- else
- {
- comp += 4;
- org4 += 4;
- }
- }
-
- if (b8&1)
- {
- curL += ((pitch << 3) - 8);
- org8 += ((org_pitch << 3) - 8);
- }
- else
- {
- curL += 8;
- org8 += 8;
- }
- }
-
- currMB->mb_intra = mb_intra; // restore the value
-
- if (cost < *min_cost)
- {
- *min_cost = cost;
- currMB->mbMode = AVC_I4;
- currMB->mb_intra = 1;
- }
-
- return ;
-}
-
-
-/* search for i4 mode for a 4x4 block */
-int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org)
-{
- AVCCommonObj *video = encvid->common;
- AVCNeighborAvailability availability;
- AVCMacroblock *currMB = video->currMB;
- bool top_left = FALSE;
- int pitch = video->currPic->pitch;
- uint8 mode_avail[AVCNumI4PredMode];
- uint32 temp, DC;
- uint8 *pred;
- int org_pitch = encvid->currInput->pitch;
- uint16 min_cost, cost;
-
- int P_x, Q_x, R_x, P_y, Q_y, R_y, D, D0, D1;
- int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2;
- uint8 P_A, P_B, P_C, P_D, P_E, P_F, P_G, P_H, P_I, P_J, P_K, P_L, P_X;
- int r0, r1, r2, r3, r4, r5, r6, r7;
- int x0, x1, x2, x3, x4, x5;
- uint32 temp1, temp2;
-
- int ipmode, mostProbableMode;
- int fixedcost = 4 * encvid->lambda_mode;
- int min_sad = 0x7FFF;
-
- availability.left = TRUE;
- availability.top = TRUE;
- if (blkidx <= 3) /* top row block (!block_y) */
- { /* check availability up */
- availability.top = video->intraAvailB ;
- }
- if (!(blkidx&0x3)) /* left column block (!block_x)*/
- { /* check availability left */
- availability.left = video->intraAvailA ;
- }
- availability.top_right = BlkTopRight[blkidx];
-
- if (availability.top_right == 2)
- {
- availability.top_right = video->intraAvailB;
- }
- else if (availability.top_right == 3)
- {
- availability.top_right = video->intraAvailC;
- }
-
- if (availability.top == TRUE)
- {
- temp = *(uint32*)(cur - pitch);
- P_A = temp & 0xFF;
- P_B = (temp >> 8) & 0xFF;
- P_C = (temp >> 16) & 0xFF;
- P_D = (temp >> 24) & 0xFF;
- }
- else
- {
- P_A = P_B = P_C = P_D = 128;
- }
-
- if (availability.top_right == TRUE)
- {
- temp = *(uint32*)(cur - pitch + 4);
- P_E = temp & 0xFF;
- P_F = (temp >> 8) & 0xFF;
- P_G = (temp >> 16) & 0xFF;
- P_H = (temp >> 24) & 0xFF;
- }
- else
- {
- P_E = P_F = P_G = P_H = 128;
- }
-
- if (availability.left == TRUE)
- {
- cur--;
- P_I = *cur;
- P_J = *(cur += pitch);
- P_K = *(cur += pitch);
- P_L = *(cur + pitch);
- cur -= (pitch << 1);
- cur++;
- }
- else
- {
- P_I = P_J = P_K = P_L = 128;
- }
-
- /* check if top-left pixel is available */
- if (((blkidx > 3) && (blkidx&0x3)) || ((blkidx > 3) && video->intraAvailA)
- || ((blkidx&0x3) && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB))
- {
- top_left = TRUE;
- P_X = *(cur - pitch - 1);
- }
- else
- {
- P_X = 128;
- }
-
- //===== INTRA PREDICTION FOR 4x4 BLOCK =====
- /* vertical */
- mode_avail[AVC_I4_Vertical] = 0;
- if (availability.top)
- {
- mode_avail[AVC_I4_Vertical] = 1;
- pred = encvid->pred_i4[AVC_I4_Vertical];
-
- temp = (P_D << 24) | (P_C << 16) | (P_B << 8) | P_A ;
- *((uint32*)pred) = temp; /* write 4 at a time */
- *((uint32*)(pred += 4)) = temp;
- *((uint32*)(pred += 4)) = temp;
- *((uint32*)(pred += 4)) = temp;
- }
- /* horizontal */
- mode_avail[AVC_I4_Horizontal] = 0;
- mode_avail[AVC_I4_Horizontal_Up] = 0;
- if (availability.left)
- {
- mode_avail[AVC_I4_Horizontal] = 1;
- pred = encvid->pred_i4[AVC_I4_Horizontal];
-
- temp = P_I | (P_I << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- temp = P_J | (P_J << 8);
- temp = temp | (temp << 16);
- *((uint32*)(pred += 4)) = temp;
- temp = P_K | (P_K << 8);
- temp = temp | (temp << 16);
- *((uint32*)(pred += 4)) = temp;
- temp = P_L | (P_L << 8);
- temp = temp | (temp << 16);
- *((uint32*)(pred += 4)) = temp;
-
- mode_avail[AVC_I4_Horizontal_Up] = 1;
- pred = encvid->pred_i4[AVC_I4_Horizontal_Up];
-
- Q0 = (P_J + P_K + 1) >> 1;
- Q1 = (P_J + (P_K << 1) + P_L + 2) >> 2;
- P0 = ((P_I + P_J + 1) >> 1);
- P1 = ((P_I + (P_J << 1) + P_K + 2) >> 2);
-
- temp = P0 | (P1 << 8); // [P0 P1 Q0 Q1]
- temp |= (Q0 << 16); // [Q0 Q1 R0 DO]
- temp |= (Q1 << 24); // [R0 D0 D1 D1]
- *((uint32*)pred) = temp; // [D1 D1 D1 D1]
-
- D0 = (P_K + 3 * P_L + 2) >> 2;
- R0 = (P_K + P_L + 1) >> 1;
-
- temp = Q0 | (Q1 << 8);
- temp |= (R0 << 16);
- temp |= (D0 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- D1 = P_L;
-
- temp = R0 | (D0 << 8);
- temp |= (D1 << 16);
- temp |= (D1 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = D1 | (D1 << 8);
- temp |= (temp << 16);
- *((uint32*)(pred += 4)) = temp;
- }
- /* DC */
- mode_avail[AVC_I4_DC] = 1;
- pred = encvid->pred_i4[AVC_I4_DC];
- if (availability.left)
- {
- DC = P_I + P_J + P_K + P_L;
-
- if (availability.top)
- {
- DC = (P_A + P_B + P_C + P_D + DC + 4) >> 3;
- }
- else
- {
- DC = (DC + 2) >> 2;
-
- }
- }
- else if (availability.top)
- {
- DC = (P_A + P_B + P_C + P_D + 2) >> 2;
-
- }
- else
- {
- DC = 128;
- }
-
- temp = DC | (DC << 8);
- temp = temp | (temp << 16);
- *((uint32*)pred) = temp;
- *((uint32*)(pred += 4)) = temp;
- *((uint32*)(pred += 4)) = temp;
- *((uint32*)(pred += 4)) = temp;
-
- /* Down-left */
- mode_avail[AVC_I4_Diagonal_Down_Left] = 0;
-
- if (availability.top)
- {
- mode_avail[AVC_I4_Diagonal_Down_Left] = 1;
-
- pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Left];
-
- r0 = P_A;
- r1 = P_B;
- r2 = P_C;
- r3 = P_D;
-
- r0 += (r1 << 1);
- r0 += r2;
- r0 += 2;
- r0 >>= 2;
- r1 += (r2 << 1);
- r1 += r3;
- r1 += 2;
- r1 >>= 2;
-
- if (availability.top_right)
- {
- r4 = P_E;
- r5 = P_F;
- r6 = P_G;
- r7 = P_H;
-
- r2 += (r3 << 1);
- r2 += r4;
- r2 += 2;
- r2 >>= 2;
- r3 += (r4 << 1);
- r3 += r5;
- r3 += 2;
- r3 >>= 2;
- r4 += (r5 << 1);
- r4 += r6;
- r4 += 2;
- r4 >>= 2;
- r5 += (r6 << 1);
- r5 += r7;
- r5 += 2;
- r5 >>= 2;
- r6 += (3 * r7);
- r6 += 2;
- r6 >>= 2;
- temp = r0 | (r1 << 8);
- temp |= (r2 << 16);
- temp |= (r3 << 24);
- *((uint32*)pred) = temp;
-
- temp = (temp >> 8) | (r4 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = (temp >> 8) | (r5 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = (temp >> 8) | (r6 << 24);
- *((uint32*)(pred += 4)) = temp;
- }
- else
- {
- r2 += (r3 * 3);
- r2 += 2;
- r2 >>= 2;
- r3 = ((r3 << 2) + 2);
- r3 >>= 2;
-
- temp = r0 | (r1 << 8);
- temp |= (r2 << 16);
- temp |= (r3 << 24);
- *((uint32*)pred) = temp;
-
- temp = (temp >> 8) | (r3 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = (temp >> 8) | (r3 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = (temp >> 8) | (r3 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- }
- }
-
- /* Down Right */
- mode_avail[AVC_I4_Diagonal_Down_Right] = 0;
- /* Diagonal Vertical Right */
- mode_avail[AVC_I4_Vertical_Right] = 0;
- /* Horizontal Down */
- mode_avail[AVC_I4_Horizontal_Down] = 0;
-
- if (top_left == TRUE)
- {
- /* Down Right */
- mode_avail[AVC_I4_Diagonal_Down_Right] = 1;
- pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Right];
-
- Q_x = (P_A + 2 * P_B + P_C + 2) >> 2;
- R_x = (P_B + 2 * P_C + P_D + 2) >> 2;
- P_x = (P_X + 2 * P_A + P_B + 2) >> 2;
- D = (P_A + 2 * P_X + P_I + 2) >> 2;
- P_y = (P_X + 2 * P_I + P_J + 2) >> 2;
- Q_y = (P_I + 2 * P_J + P_K + 2) >> 2;
- R_y = (P_J + 2 * P_K + P_L + 2) >> 2;
-
- /* we can pack these */
- temp = D | (P_x << 8); //[D P_x Q_x R_x]
- //[P_y D P_x Q_x]
- temp |= (Q_x << 16); //[Q_y P_y D P_x]
- temp |= (R_x << 24); //[R_y Q_y P_y D ]
- *((uint32*)pred) = temp;
-
- temp = P_y | (D << 8);
- temp |= (P_x << 16);
- temp |= (Q_x << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = Q_y | (P_y << 8);
- temp |= (D << 16);
- temp |= (P_x << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = R_y | (Q_y << 8);
- temp |= (P_y << 16);
- temp |= (D << 24);
- *((uint32*)(pred += 4)) = temp;
-
-
- /* Diagonal Vertical Right */
- mode_avail[AVC_I4_Vertical_Right] = 1;
- pred = encvid->pred_i4[AVC_I4_Vertical_Right];
-
- Q0 = P_A + P_B + 1;
- R0 = P_B + P_C + 1;
- S0 = P_C + P_D + 1;
- P0 = P_X + P_A + 1;
- D = (P_I + 2 * P_X + P_A + 2) >> 2;
-
- P1 = (P0 + Q0) >> 2;
- Q1 = (Q0 + R0) >> 2;
- R1 = (R0 + S0) >> 2;
-
- P0 >>= 1;
- Q0 >>= 1;
- R0 >>= 1;
- S0 >>= 1;
-
- P2 = (P_X + 2 * P_I + P_J + 2) >> 2;
- Q2 = (P_I + 2 * P_J + P_K + 2) >> 2;
-
- temp = P0 | (Q0 << 8); //[P0 Q0 R0 S0]
- //[D P1 Q1 R1]
- temp |= (R0 << 16); //[P2 P0 Q0 R0]
- temp |= (S0 << 24); //[Q2 D P1 Q1]
- *((uint32*)pred) = temp;
-
- temp = D | (P1 << 8);
- temp |= (Q1 << 16);
- temp |= (R1 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = P2 | (P0 << 8);
- temp |= (Q0 << 16);
- temp |= (R0 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = Q2 | (D << 8);
- temp |= (P1 << 16);
- temp |= (Q1 << 24);
- *((uint32*)(pred += 4)) = temp;
-
-
- /* Horizontal Down */
- mode_avail[AVC_I4_Horizontal_Down] = 1;
- pred = encvid->pred_i4[AVC_I4_Horizontal_Down];
-
-
- Q2 = (P_A + 2 * P_B + P_C + 2) >> 2;
- P2 = (P_X + 2 * P_A + P_B + 2) >> 2;
- D = (P_I + 2 * P_X + P_A + 2) >> 2;
- P0 = P_X + P_I + 1;
- Q0 = P_I + P_J + 1;
- R0 = P_J + P_K + 1;
- S0 = P_K + P_L + 1;
-
- P1 = (P0 + Q0) >> 2;
- Q1 = (Q0 + R0) >> 2;
- R1 = (R0 + S0) >> 2;
-
- P0 >>= 1;
- Q0 >>= 1;
- R0 >>= 1;
- S0 >>= 1;
-
-
- /* we can pack these */
- temp = P0 | (D << 8); //[P0 D P2 Q2]
- //[Q0 P1 P0 D ]
- temp |= (P2 << 16); //[R0 Q1 Q0 P1]
- temp |= (Q2 << 24); //[S0 R1 R0 Q1]
- *((uint32*)pred) = temp;
-
- temp = Q0 | (P1 << 8);
- temp |= (P0 << 16);
- temp |= (D << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = R0 | (Q1 << 8);
- temp |= (Q0 << 16);
- temp |= (P1 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- temp = S0 | (R1 << 8);
- temp |= (R0 << 16);
- temp |= (Q1 << 24);
- *((uint32*)(pred += 4)) = temp;
-
- }
-
- /* vertical left */
- mode_avail[AVC_I4_Vertical_Left] = 0;
- if (availability.top)
- {
- mode_avail[AVC_I4_Vertical_Left] = 1;
- pred = encvid->pred_i4[AVC_I4_Vertical_Left];
-
- x0 = P_A + P_B + 1;
- x1 = P_B + P_C + 1;
- x2 = P_C + P_D + 1;
- if (availability.top_right)
- {
- x3 = P_D + P_E + 1;
- x4 = P_E + P_F + 1;
- x5 = P_F + P_G + 1;
- }
- else
- {
- x3 = x4 = x5 = (P_D << 1) + 1;
- }
-
- temp1 = (x0 >> 1);
- temp1 |= ((x1 >> 1) << 8);
- temp1 |= ((x2 >> 1) << 16);
- temp1 |= ((x3 >> 1) << 24);
-
- *((uint32*)pred) = temp1;
-
- temp2 = ((x0 + x1) >> 2);
- temp2 |= (((x1 + x2) >> 2) << 8);
- temp2 |= (((x2 + x3) >> 2) << 16);
- temp2 |= (((x3 + x4) >> 2) << 24);
-
- *((uint32*)(pred += 4)) = temp2;
-
- temp1 = (temp1 >> 8) | ((x4 >> 1) << 24); /* rotate out old value */
- *((uint32*)(pred += 4)) = temp1;
-
- temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */
- *((uint32*)(pred += 4)) = temp2;
- }
-
- //===== LOOP OVER ALL 4x4 INTRA PREDICTION MODES =====
- // can re-order the search here instead of going in order
-
- // find most probable mode
- encvid->mostProbableI4Mode[blkidx] = mostProbableMode = FindMostProbableI4Mode(video, blkidx);
-
- min_cost = 0xFFFF;
-
- for (ipmode = 0; ipmode < AVCNumI4PredMode; ipmode++)
- {
- if (mode_avail[ipmode] == TRUE)
- {
- cost = (ipmode == mostProbableMode) ? 0 : fixedcost;
- pred = encvid->pred_i4[ipmode];
-
- cost_i4(org, org_pitch, pred, &cost);
-
- if (cost < min_cost)
- {
- currMB->i4Mode[blkidx] = (AVCIntra4x4PredMode)ipmode;
- min_cost = cost;
- min_sad = cost - ((ipmode == mostProbableMode) ? 0 : fixedcost);
- }
- }
- }
-
- if (blkidx == 0)
- {
- encvid->i4_sad = min_sad;
- }
- else
- {
- encvid->i4_sad += min_sad;
- }
-
- return min_cost;
-}
-
-int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx)
-{
- int dcOnlyPredictionFlag;
- AVCMacroblock *currMB = video->currMB;
- int intra4x4PredModeA, intra4x4PredModeB, predIntra4x4PredMode;
-
-
- dcOnlyPredictionFlag = 0;
- if (blkidx&0x3)
- {
- intra4x4PredModeA = currMB->i4Mode[blkidx-1]; // block to the left
- }
- else /* for blk 0, 4, 8, 12 */
- {
- if (video->intraAvailA)
- {
- if (video->mblock[video->mbAddrA].mbMode == AVC_I4)
- {
- intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[blkidx + 3];
- }
- else
- {
- intra4x4PredModeA = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- goto PRED_RESULT_READY; // skip below
- }
- }
-
- if (blkidx >> 2)
- {
- intra4x4PredModeB = currMB->i4Mode[blkidx-4]; // block above
- }
- else /* block 0, 1, 2, 3 */
- {
- if (video->intraAvailB)
- {
- if (video->mblock[video->mbAddrB].mbMode == AVC_I4)
- {
- intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[blkidx+12];
- }
- else
- {
- intra4x4PredModeB = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- }
- }
-
-PRED_RESULT_READY:
- if (dcOnlyPredictionFlag)
- {
- intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;
- }
-
- predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);
-
- return predIntra4x4PredMode;
-}
-
-void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost)
-{
- int k;
- int16 res[16], *pres;
- int m0, m1, m2, m3, tmp1;
- int satd = 0;
-
- pres = res;
- // horizontal transform
- k = 4;
- while (k > 0)
- {
- m0 = org[0] - pred[0];
- m3 = org[3] - pred[3];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = org[1] - pred[1];
- m2 = org[2] - pred[2];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[2] = m0 - m1;
- pres[1] = m2 + m3;
- pres[3] = m3 - m2;
-
- org += org_pitch;
- pres += 4;
- pred += 4;
- k--;
- }
- /* vertical transform */
- pres = res;
- k = 4;
- while (k > 0)
- {
- m0 = pres[0];
- m3 = pres[12];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = pres[4];
- m2 = pres[8];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[8] = m0 - m1;
- pres[4] = m2 + m3;
- pres[12] = m3 - m2;
-
- pres++;
- k--;
-
- }
-
- pres = res;
- k = 4;
- while (k > 0)
- {
- tmp1 = *pres++;
- satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
- k--;
- }
-
- satd = (satd + 1) >> 1;
- *cost += satd;
-
- return ;
-}
-
-void chroma_intra_search(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCPictureData *currPic = video->currPic;
-
- int x_pos = video->mb_x << 3;
- int y_pos = video->mb_y << 3;
- int pitch = currPic->pitch >> 1;
- int offset = y_pos * pitch + x_pos;
-
- uint8 *comp_ref_x, *comp_ref_y, *pred;
- int sum_x0, sum_x1, sum_y0, sum_y1;
- int pred_0[2], pred_1[2], pred_2[2], pred_3[2];
- uint32 pred_a, pred_b, pred_c, pred_d;
- int i, j, component;
- int a_16, b, c, factor_c, topleft;
- int H, V, value;
- uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
-
- uint8 *curCb = currPic->Scb + offset;
- uint8 *curCr = currPic->Scr + offset;
-
- uint8 *orgCb, *orgCr;
- AVCFrameIO *currInput = encvid->currInput;
- AVCMacroblock *currMB = video->currMB;
- int org_pitch;
- int cost, mincost;
-
- /* evaluate DC mode */
- if (video->intraAvailB & video->intraAvailA)
- {
- comp_ref_x = curCb - pitch;
- comp_ref_y = curCb - 1;
-
- for (i = 0; i < 2; i++)
- {
- pred_a = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- pred_b = (pred_a >> 8) & 0xFF00FF;
- pred_a &= 0xFF00FF;
- pred_a += pred_b;
- pred_a += (pred_a >> 16);
- sum_x0 = pred_a & 0xFFFF;
-
- pred_a = *((uint32*)comp_ref_x);
- pred_b = (pred_a >> 8) & 0xFF00FF;
- pred_a &= 0xFF00FF;
- pred_a += pred_b;
- pred_a += (pred_a >> 16);
- sum_x1 = pred_a & 0xFFFF;
-
- pred_1[i] = (sum_x1 + 2) >> 2;
-
- sum_y0 = *comp_ref_y;
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
-
- sum_y1 = *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
-
- pred_2[i] = (sum_y1 + 2) >> 2;
-
- pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3;
- pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3;
-
- comp_ref_x = curCr - pitch;
- comp_ref_y = curCr - 1;
- }
- }
-
- else if (video->intraAvailA)
- {
- comp_ref_y = curCb - 1;
- for (i = 0; i < 2; i++)
- {
- sum_y0 = *comp_ref_y;
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
- sum_y0 += *(comp_ref_y += pitch);
-
- sum_y1 = *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
- sum_y1 += *(comp_ref_y += pitch);
-
- pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2;
- pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2;
-
- comp_ref_y = curCr - 1;
- }
- }
- else if (video->intraAvailB)
- {
- comp_ref_x = curCb - pitch;
- for (i = 0; i < 2; i++)
- {
- pred_a = *((uint32*)comp_ref_x);
- comp_ref_x += 4;
- pred_b = (pred_a >> 8) & 0xFF00FF;
- pred_a &= 0xFF00FF;
- pred_a += pred_b;
- pred_a += (pred_a >> 16);
- sum_x0 = pred_a & 0xFFFF;
-
- pred_a = *((uint32*)comp_ref_x);
- pred_b = (pred_a >> 8) & 0xFF00FF;
- pred_a &= 0xFF00FF;
- pred_a += pred_b;
- pred_a += (pred_a >> 16);
- sum_x1 = pred_a & 0xFFFF;
-
- pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2;
- pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2;
-
- comp_ref_x = curCr - pitch;
- }
- }
- else
- {
- pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] =
- pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128;
- }
-
- pred = encvid->pred_ic[AVC_IC_DC];
-
- pred_a = pred_0[0];
- pred_b = pred_1[0];
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- pred_b |= (pred_b << 8);
- pred_b |= (pred_b << 16);
-
- pred_c = pred_0[1];
- pred_d = pred_1[1];
- pred_c |= (pred_c << 8);
- pred_c |= (pred_c << 16);
- pred_d |= (pred_d << 8);
- pred_d |= (pred_d << 16);
-
-
- for (j = 0; j < 4; j++) /* 4 lines */
- {
- *((uint32*)pred) = pred_a;
- *((uint32*)(pred + 4)) = pred_b;
- *((uint32*)(pred + 8)) = pred_c;
- *((uint32*)(pred + 12)) = pred_d;
- pred += 16; /* move to the next line */
- }
-
- pred_a = pred_2[0];
- pred_b = pred_3[0];
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- pred_b |= (pred_b << 8);
- pred_b |= (pred_b << 16);
-
- pred_c = pred_2[1];
- pred_d = pred_3[1];
- pred_c |= (pred_c << 8);
- pred_c |= (pred_c << 16);
- pred_d |= (pred_d << 8);
- pred_d |= (pred_d << 16);
-
- for (j = 0; j < 4; j++) /* 4 lines */
- {
- *((uint32*)pred) = pred_a;
- *((uint32*)(pred + 4)) = pred_b;
- *((uint32*)(pred + 8)) = pred_c;
- *((uint32*)(pred + 12)) = pred_d;
- pred += 16; /* move to the next line */
- }
-
- /* predict horizontal mode */
- if (video->intraAvailA)
- {
- comp_ref_y = curCb - 1;
- comp_ref_x = curCr - 1;
- pred = encvid->pred_ic[AVC_IC_Horizontal];
-
- for (i = 4; i < 6; i++)
- {
- for (j = 0; j < 4; j++)
- {
- pred_a = *comp_ref_y;
- comp_ref_y += pitch;
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- *((uint32*)pred) = pred_a;
- *((uint32*)(pred + 4)) = pred_a;
-
- pred_a = *comp_ref_x;
- comp_ref_x += pitch;
- pred_a |= (pred_a << 8);
- pred_a |= (pred_a << 16);
- *((uint32*)(pred + 8)) = pred_a;
- *((uint32*)(pred + 12)) = pred_a;
-
- pred += 16;
- }
- }
- }
-
- /* vertical mode */
- if (video->intraAvailB)
- {
- comp_ref_x = curCb - pitch;
- comp_ref_y = curCr - pitch;
- pred = encvid->pred_ic[AVC_IC_Vertical];
-
- pred_a = *((uint32*)comp_ref_x);
- pred_b = *((uint32*)(comp_ref_x + 4));
- pred_c = *((uint32*)comp_ref_y);
- pred_d = *((uint32*)(comp_ref_y + 4));
-
- for (j = 0; j < 8; j++)
- {
- *((uint32*)pred) = pred_a;
- *((uint32*)(pred + 4)) = pred_b;
- *((uint32*)(pred + 8)) = pred_c;
- *((uint32*)(pred + 12)) = pred_d;
- pred += 16;
- }
- }
-
- /* Intra_Chroma_Plane */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- comp_ref_x = curCb - pitch;
- comp_ref_y = curCb - 1;
- topleft = curCb[-pitch-1];
-
- pred = encvid->pred_ic[AVC_IC_Plane];
- for (component = 0; component < 2; component++)
- {
- H = V = 0;
- comp_ref_x0 = comp_ref_x + 4;
- comp_ref_x1 = comp_ref_x + 2;
- comp_ref_y0 = comp_ref_y + (pitch << 2);
- comp_ref_y1 = comp_ref_y + (pitch << 1);
- for (i = 1; i < 4; i++)
- {
- H += i * (*comp_ref_x0++ - *comp_ref_x1--);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
- comp_ref_y0 += pitch;
- comp_ref_y1 -= pitch;
- }
- H += i * (*comp_ref_x0++ - topleft);
- V += i * (*comp_ref_y0 - *comp_ref_y1);
-
- a_16 = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16;
- b = (17 * H + 16) >> 5;
- c = (17 * V + 16) >> 5;
-
- pred_a = 0;
- for (i = 4; i < 6; i++)
- {
- for (j = 0; j < 4; j++)
- {
- factor_c = a_16 + c * (pred_a++ - 3);
-
- factor_c -= 3 * b;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 16);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 24);
- *((uint32*)pred) = pred_b;
-
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b = value;
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 8);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 16);
- value = factor_c >> 5;
- factor_c += b;
- CLIP_RESULT(value)
- pred_b |= (value << 24);
- *((uint32*)(pred + 4)) = pred_b;
- pred += 16;
- }
- }
-
- pred -= 120; /* point to cr */
- comp_ref_x = curCr - pitch;
- comp_ref_y = curCr - 1;
- topleft = curCr[-pitch-1];
- }
- }
-
- /* now evaluate it */
-
- org_pitch = (currInput->pitch) >> 1;
- offset = x_pos + y_pos * org_pitch;
-
- orgCb = currInput->YCbCr[1] + offset;
- orgCr = currInput->YCbCr[2] + offset;
-
- mincost = 0x7fffffff;
- cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_DC], mincost);
- if (cost < mincost)
- {
- mincost = cost;
- currMB->intra_chroma_pred_mode = AVC_IC_DC;
- }
-
- if (video->intraAvailA)
- {
- cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Horizontal], mincost);
- if (cost < mincost)
- {
- mincost = cost;
- currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;
- }
- }
-
- if (video->intraAvailB)
- {
- cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Vertical], mincost);
- if (cost < mincost)
- {
- mincost = cost;
- currMB->intra_chroma_pred_mode = AVC_IC_Vertical;
- }
- }
-
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Plane], mincost);
- if (cost < mincost)
- {
- mincost = cost;
- currMB->intra_chroma_pred_mode = AVC_IC_Plane;
- }
- }
-
-
- return ;
-}
-
-
-int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int min_cost)
-{
- int cost;
- /* first take difference between orgCb, orgCr and pred */
- int16 res[128], *pres; // residue
- int m0, m1, m2, m3, tmp1;
- int j, k;
-
- pres = res;
- org_pitch -= 8;
- // horizontal transform
- for (j = 0; j < 8; j++)
- {
- k = 2;
- while (k > 0)
- {
- m0 = orgCb[0] - pred[0];
- m3 = orgCb[3] - pred[3];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = orgCb[1] - pred[1];
- m2 = orgCb[2] - pred[2];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[2] = m0 - m1;
- pres[1] = m2 + m3;
- pres[3] = m3 - m2;
-
- orgCb += 4;
- pres += 4;
- pred += 4;
- k--;
- }
- orgCb += org_pitch;
- k = 2;
- while (k > 0)
- {
- m0 = orgCr[0] - pred[0];
- m3 = orgCr[3] - pred[3];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = orgCr[1] - pred[1];
- m2 = orgCr[2] - pred[2];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[2] = m0 - m1;
- pres[1] = m2 + m3;
- pres[3] = m3 - m2;
-
- orgCr += 4;
- pres += 4;
- pred += 4;
- k--;
- }
- orgCr += org_pitch;
- }
-
- /* vertical transform */
- for (j = 0; j < 2; j++)
- {
- pres = res + (j << 6);
- k = 16;
- while (k > 0)
- {
- m0 = pres[0];
- m3 = pres[3<<4];
- m0 += m3;
- m3 = m0 - (m3 << 1);
- m1 = pres[1<<4];
- m2 = pres[2<<4];
- m1 += m2;
- m2 = m1 - (m2 << 1);
- pres[0] = m0 + m1;
- pres[2<<4] = m0 - m1;
- pres[1<<4] = m2 + m3;
- pres[3<<4] = m3 - m2;
-
- pres++;
- k--;
- }
- }
-
- /* now sum of absolute value */
- pres = res;
- cost = 0;
- k = 128;
- while (k > 0)
- {
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- tmp1 = *pres++;
- cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
- k -= 8;
- if (cost > min_cost) /* early drop out */
- {
- return cost;
- }
- }
-
- return cost;
-}
-
-
-
-///////////////////////////////// old code, unused
-/* find the best intra mode based on original (unencoded) frame */
-/* output is
- currMB->mb_intra, currMB->mbMode,
- currMB->i16Mode (if currMB->mbMode == AVC_I16)
- currMB->i4Mode[..] (if currMB->mbMode == AVC_I4) */
-
-#ifdef FIXED_INTRAPRED_MODE
-void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)
-{
- (void)(mbNum);
-
- AVCCommonObj *video = encvid->common;
- int indx, block_x, block_y;
-
- video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
-
- if (!video->currPicParams->constrained_intra_pred_flag)
- {
- video->intraAvailA = video->mbAvailA;
- video->intraAvailB = video->mbAvailB;
- video->intraAvailC = video->mbAvailC;
- video->intraAvailD = video->mbAvailD;
- }
- else
- {
- if (video->mbAvailA)
- {
- video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
- }
- if (video->mbAvailB)
- {
- video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
- }
- if (video->mbAvailC)
- {
- video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
- }
- if (video->mbAvailD)
- {
- video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
- }
- }
-
- currMB->mb_intra = TRUE;
- currMB->mbMode = FIXED_INTRAPRED_MODE;
-
- if (currMB->mbMode == AVC_I16)
- {
- currMB->i16Mode = FIXED_I16_MODE;
-
- if (FIXED_I16_MODE == AVC_I16_Vertical && !video->intraAvailB)
- {
- currMB->i16Mode = AVC_I16_DC;
- }
-
- if (FIXED_I16_MODE == AVC_I16_Horizontal && !video->intraAvailA)
- {
- currMB->i16Mode = AVC_I16_DC;
- }
-
- if (FIXED_I16_MODE == AVC_I16_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))
- {
- currMB->i16Mode = AVC_I16_DC;
- }
- }
- else //if(currMB->mbMode == AVC_I4)
- {
- for (indx = 0; indx < 16; indx++)
- {
- block_x = blkIdx2blkX[indx];
- block_y = blkIdx2blkY[indx];
-
- currMB->i4Mode[(block_y<<2)+block_x] = FIXED_I4_MODE;
-
- if (FIXED_I4_MODE == AVC_I4_Vertical && !(block_y > 0 || video->intraAvailB))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Horizontal && !(block_x || video->intraAvailA))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Left &&
- (block_y == 0 && !video->intraAvailB))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Right &&
- !((block_y && block_x)
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Vertical_Right &&
- !((block_y && block_x)
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Horizontal_Down &&
- !((block_y && block_x)
- || (block_y && video->intraAvailA)
- || (block_x && video->intraAvailB)
- || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Vertical_Left &&
- (block_y == 0 && !video->intraAvailB))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
-
- if (FIXED_I4_MODE == AVC_I4_Horizontal_Up && !(block_x || video->intraAvailA))
- {
- currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
- }
- }
- }
-
- currMB->intra_chroma_pred_mode = FIXED_INTRA_CHROMA_MODE;
-
- if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Horizontal && !(video->intraAvailA))
- {
- currMB->intra_chroma_pred_mode = AVC_IC_DC;
- }
-
- if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Vertical && !(video->intraAvailB))
- {
- currMB->intra_chroma_pred_mode = AVC_IC_DC;
- }
-
- if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))
- {
- currMB->intra_chroma_pred_mode = AVC_IC_DC;
- }
-
- /* also reset the motion vectors */
- /* set MV and Ref_Idx codes of Intra blocks in P-slices */
- memset(currMB->mvL0, 0, sizeof(int32)*16);
- currMB->ref_idx_L0[0] = -1;
- currMB->ref_idx_L0[1] = -1;
- currMB->ref_idx_L0[2] = -1;
- currMB->ref_idx_L0[3] = -1;
-
- // output from this function, currMB->mbMode should be set to either
- // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */
- return ;
-}
-#else // faster combined prediction+SAD calculation
-void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)
-{
- AVCCommonObj *video = encvid->common;
- AVCFrameIO *currInput = encvid->currInput;
- uint8 *curL, *curCb, *curCr;
- uint8 *comp, *pred_block;
- int block_x, block_y, offset;
- uint sad, sad4, sadI4, sadI16;
- int component, SubBlock_indx, temp;
- int pitch = video->currPic->pitch;
-
- /* calculate the cost of each intra prediction mode and compare to the
- inter mode */
- /* full search for all intra prediction */
- offset = (video->mb_y << 4) * pitch + (video->mb_x << 4);
- curL = currInput->YCbCr[0] + offset;
- pred_block = video->pred_block + 84;
-
- /* Assuming that InitNeighborAvailability has been called prior to this function */
- video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
-
- if (!video->currPicParams->constrained_intra_pred_flag)
- {
- video->intraAvailA = video->mbAvailA;
- video->intraAvailB = video->mbAvailB;
- video->intraAvailC = video->mbAvailC;
- video->intraAvailD = video->mbAvailD;
- }
- else
- {
- if (video->mbAvailA)
- {
- video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
- }
- if (video->mbAvailB)
- {
- video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
- }
- if (video->mbAvailC)
- {
- video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
- }
- if (video->mbAvailD)
- {
- video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
- }
- }
-
- /* currently we're doing exhaustive search. Smart search will be used later */
-
- /* I16 modes */
- curL = currInput->YCbCr[0] + offset;
- video->pintra_pred_top = curL - pitch;
- video->pintra_pred_left = curL - 1;
- if (video->mb_y)
- {
- video->intra_pred_topleft = *(curL - pitch - 1);
- }
-
- /* Intra_16x16_Vertical */
- sadI16 = 65536;
- /* check availability of top */
- if (video->intraAvailB)
- {
- sad = SAD_I16_Vert(video, curL, sadI16);
-
- if (sad < sadI16)
- {
- sadI16 = sad;
- currMB->i16Mode = AVC_I16_Vertical;
- }
- }
- /* Intra_16x16_Horizontal */
- /* check availability of left */
- if (video->intraAvailA)
- {
- sad = SAD_I16_HorzDC(video, curL, AVC_I16_Horizontal, sadI16);
-
- if (sad < sadI16)
- {
- sadI16 = sad;
- currMB->i16Mode = AVC_I16_Horizontal;
- }
- }
-
- /* Intra_16x16_DC, default mode */
- sad = SAD_I16_HorzDC(video, curL, AVC_I16_DC, sadI16);
- if (sad < sadI16)
- {
- sadI16 = sad;
- currMB->i16Mode = AVC_I16_DC;
- }
-
- /* Intra_16x16_Plane */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- sad = SAD_I16_Plane(video, curL, sadI16);
-
- if (sad < sadI16)
- {
- sadI16 = sad;
- currMB->i16Mode = AVC_I16_Plane;
- }
- }
-
- sadI16 >>= 1; /* before comparison */
-
- /* selection between intra4, intra16 or inter mode */
- if (sadI16 < encvid->min_cost)
- {
- currMB->mb_intra = TRUE;
- currMB->mbMode = AVC_I16;
- encvid->min_cost = sadI16;
- }
-
- if (currMB->mb_intra) /* only do the chrominance search when intra is decided */
- {
- /* Note that we might be able to guess the type of prediction from
- the luma prediction type */
-
- /* now search for the best chroma intra prediction */
- offset = (offset >> 2) + (video->mb_x << 2);
- curCb = currInput->YCbCr[1] + offset;
- curCr = currInput->YCbCr[2] + offset;
-
- pitch >>= 1;
- video->pintra_pred_top_cb = curCb - pitch;
- video->pintra_pred_left_cb = curCb - 1;
- video->pintra_pred_top_cr = curCr - pitch;
- video->pintra_pred_left_cr = curCr - 1;
-
- if (video->mb_y)
- {
- video->intra_pred_topleft_cb = *(curCb - pitch - 1);
- video->intra_pred_topleft_cr = *(curCr - pitch - 1);
- }
-
- /* Intra_Chroma_DC */
- sad4 = SAD_Chroma_DC(video, curCb, curCr, 65536);
- currMB->intra_chroma_pred_mode = AVC_IC_DC;
-
- /* Intra_Chroma_Horizontal */
- if (video->intraAvailA)
- {
- /* check availability of left */
- sad = SAD_Chroma_Horz(video, curCb, curCr, sad4);
- if (sad < sad4)
- {
- sad4 = sad;
- currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;
- }
- }
-
- /* Intra_Chroma_Vertical */
- if (video->intraAvailB)
- {
- /* check availability of top */
- sad = SAD_Chroma_Vert(video, curCb, curCr, sad4);
-
- if (sad < sad4)
- {
- sad4 = sad;
- currMB->intra_chroma_pred_mode = AVC_IC_Vertical;
- }
- }
-
- /* Intra_Chroma_Plane */
- if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
- {
- /* check availability of top and left */
- Intra_Chroma_Plane(video, pitch);
-
- sad = SADChroma(pred_block + 452, curCb, curCr, pitch);
-
- if (sad < sad4)
- {
- sad4 = sad;
- currMB->intra_chroma_pred_mode = AVC_IC_Plane;
- }
- }
-
- /* also reset the motion vectors */
- /* set MV and Ref_Idx codes of Intra blocks in P-slices */
- memset(currMB->mvL0, 0, sizeof(int32)*16);
- memset(currMB->ref_idx_L0, -1, sizeof(int16)*4);
-
- }
-
- // output from this function, currMB->mbMode should be set to either
- // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */
-
- return ;
-}
-#endif
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp b/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp
deleted file mode 100644
index d19125f..0000000
--- a/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp
+++ /dev/null
@@ -1,2152 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-#include "avcenc_int.h"
-
-
-#define CLIP_RESULT(x) if((uint)(x) > 0xFF){ \
- (x) = 0xFF & (~((x)>>31));}
-
-/* (blkwidth << 2) + (dy << 1) + dx */
-static void (*const eChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) =
-{
- &eChromaFullMC_SIMD,
- &eChromaHorizontalMC_SIMD,
- &eChromaVerticalMC_SIMD,
- &eChromaDiagonalMC_SIMD,
- &eChromaFullMC_SIMD,
- &eChromaHorizontalMC2_SIMD,
- &eChromaVerticalMC2_SIMD,
- &eChromaDiagonalMC2_SIMD
-};
-/* Perform motion prediction and compensation with residue if exist. */
-void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video)
-{
- (void)(encvid);
-
- AVCMacroblock *currMB = video->currMB;
- AVCPictureData *currPic = video->currPic;
- int mbPartIdx, subMbPartIdx;
- int ref_idx;
- int offset_MbPart_indx = 0;
- int16 *mv;
- uint32 x_pos, y_pos;
- uint8 *curL, *curCb, *curCr;
- uint8 *ref_l, *ref_Cb, *ref_Cr;
- uint8 *predBlock, *predCb, *predCr;
- int block_x, block_y, offset_x, offset_y, offsetP, offset;
- int x_position = (video->mb_x << 4);
- int y_position = (video->mb_y << 4);
- int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx;
- int picWidth = currPic->width;
- int picPitch = currPic->pitch;
- int picHeight = currPic->height;
- uint32 tmp_word;
-
- tmp_word = y_position * picPitch;
- curL = currPic->Sl + tmp_word + x_position;
- offset = (tmp_word >> 2) + (x_position >> 1);
- curCb = currPic->Scb + offset;
- curCr = currPic->Scr + offset;
-
- predBlock = curL;
- predCb = curCb;
- predCr = curCr;
-
- GetMotionVectorPredictor(video, 1);
-
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- MbHeight = currMB->SubMbPartHeight[mbPartIdx];
- MbWidth = currMB->SubMbPartWidth[mbPartIdx];
- mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1);
- mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1;
- ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X];
- offset_indx = 0;
-
- ref_l = video->RefPicList0[ref_idx]->Sl;
- ref_Cb = video->RefPicList0[ref_idx]->Scb;
- ref_Cr = video->RefPicList0[ref_idx]->Scr;
-
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1);
- block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1);
- mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));
- offset_x = x_position + (block_x << 2);
- offset_y = y_position + (block_y << 2);
- x_pos = (offset_x << 2) + *mv++; /*quarter pel */
- y_pos = (offset_y << 2) + *mv; /*quarter pel */
-
- //offset = offset_y * currPic->width;
- //offsetC = (offset >> 2) + (offset_x >> 1);
- offsetP = (block_y << 2) * picPitch + (block_x << 2);
- eLumaMotionComp(ref_l, picPitch, picHeight, x_pos, y_pos,
- /*comp_Sl + offset + offset_x,*/
- predBlock + offsetP, picPitch, MbWidth, MbHeight);
-
- offsetP = (block_y * picWidth) + (block_x << 1);
- eChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scb + offsetC,*/
- predCb + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);
- eChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
- /*comp_Scr + offsetC,*/
- predCr + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);
-
- offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;
- }
- offset_MbPart_indx = currMB->MbPartWidth >> 4;
- }
-
- return ;
-}
-
-
-/* preform the actual motion comp here */
-void eLumaMotionComp(uint8 *ref, int picpitch, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int pred_pitch,
- int blkwidth, int blkheight)
-{
- (void)(picheight);
-
- int dx, dy;
- int temp2[21][21]; /* for intermediate results */
- uint8 *ref2;
-
- dx = x_pos & 3;
- dy = y_pos & 3;
- x_pos = x_pos >> 2; /* round it to full-pel resolution */
- y_pos = y_pos >> 2;
-
- /* perform actual motion compensation */
- if (dx == 0 && dy == 0)
- { /* fullpel position *//* G */
-
- ref += y_pos * picpitch + x_pos;
-
- eFullPelMC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight);
-
- } /* other positions */
- else if (dy == 0)
- { /* no vertical interpolation *//* a,b,c*/
-
- ref += y_pos * picpitch + x_pos;
-
- eHorzInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- else if (dx == 0)
- { /*no horizontal interpolation *//* d,h,n */
-
- ref += y_pos * picpitch + x_pos;
-
- eVertInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- else if (dy == 2)
- { /* horizontal cross *//* i, j, k */
-
- ref += y_pos * picpitch + x_pos - 2; /* move to the left 2 pixels */
-
- eVertInterp2MC(ref, picpitch, &temp2[0][0], 21, blkwidth + 5, blkheight);
-
- eHorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);
- }
- else if (dx == 2)
- { /* vertical cross */ /* f,q */
-
- ref += (y_pos - 2) * picpitch + x_pos; /* move to up 2 lines */
-
- eHorzInterp3MC(ref, picpitch, &temp2[0][0], 21, blkwidth, blkheight + 5);
- eVertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);
- }
- else
- { /* diagonal *//* e,g,p,r */
-
- ref2 = ref + (y_pos + (dy / 2)) * picpitch + x_pos;
-
- ref += (y_pos * picpitch) + x_pos + (dx / 2);
-
- eDiagonalInterpMC(ref2, ref, picpitch, pred, pred_pitch, blkwidth, blkheight);
- }
-
- return ;
-}
-
-void eCreateAlign(uint8 *ref, int picpitch, int y_pos,
- uint8 *out, int blkwidth, int blkheight)
-{
- int i, j;
- int offset, out_offset;
- uint32 prev_pix, result, pix1, pix2, pix4;
-
- ref += y_pos * picpitch;// + x_pos;
- out_offset = 24 - blkwidth;
-
- //switch(x_pos&0x3){
- switch (((intptr_t)ref)&0x3)
- {
- case 1:
- offset = picpitch - blkwidth - 3;
- for (j = 0; j < blkheight; j++)
- {
- pix1 = *ref++;
- pix2 = *((uint16*)ref);
- ref += 2;
- result = (pix2 << 8) | pix1;
-
- for (i = 3; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 8; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- case 2:
- offset = picpitch - blkwidth - 2;
- for (j = 0; j < blkheight; j++)
- {
- result = *((uint16*)ref);
- ref += 2;
- for (i = 2; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 16; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- case 3:
- offset = picpitch - blkwidth - 1;
- for (j = 0; j < blkheight; j++)
- {
- result = *ref++;
- for (i = 1; i < blkwidth; i += 4)
- {
- pix4 = *((uint32*)ref);
- ref += 4;
- prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */
- result |= prev_pix;
- *((uint32*)out) = result; /* write 4 bytes */
- out += 4;
- result = pix4 >> 24; /* for the next loop */
- }
- ref += offset;
- out += out_offset;
- }
- break;
- }
-}
-
-void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx)
-{
- uint8 *p_ref, *tmp;
- uint32 *p_cur;
- uint32 pkres;
- int result, curr_offset, ref_offset;
- int j;
- int32 r0, r1, r2, r3, r4, r5;
- int32 r13, r6;
-
- p_cur = (uint32*)out; /* assume it's word aligned */
- curr_offset = (outpitch - blkwidth) >> 2;
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- if (dx&1)
- {
- dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
- p_ref -= 2;
- r13 = 0;
- for (j = blkheight; j > 0; j--)
- {
- tmp = p_ref + blkwidth;
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
- while (p_ref < tmp)
- {
- r2 = *(p_ref += 4); /* move pointer to e */
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
-
- r5 = p_ref[dx+2];
- r6 = p_ref[dx+4];
- r5 |= (r6 << 16);
- r4 += r5;
- r4 += 0x10001;
- r4 = (r4 >> 1) & 0xFF00FF;
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
-
- r0 = p_ref[dx+3];
- r1 = p_ref[dx+5];
- r0 |= (r1 << 16);
- r5 += r0;
- r5 += 0x10001;
- r5 = (r5 >> 1) & 0xFF00FF;
-
- r4 |= (r5 << 8); /* pack them together */
- *p_cur++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_cur -= (outpitch >> 2);
-
- tmp = p_ref + blkwidth;
- for (; p_ref < tmp;)
- {
-
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- pkres = (result >> 1) ;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dx] + 1);
- result = (result >> 1);
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 5; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
- }
- else
- {
- p_ref -= 2;
- r13 = 0;
- for (j = blkheight; j > 0; j--)
- {
- tmp = p_ref + blkwidth;
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
- while (p_ref < tmp)
- {
- r2 = *(p_ref += 4); /* move pointer to e */
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
- r4 &= 0xFF00FF; /* mask */
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
- r5 &= 0xFF00FF; /* mask */
-
- r4 |= (r5 << 8); /* pack them together */
- *p_cur++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_cur -= (outpitch >> 2);
-
- tmp = p_ref + blkwidth;
- for (; p_ref < tmp;)
- {
-
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 5;
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset;
- }
- }
- }
-
- return ;
-}
-
-void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dx)
-{
- int *p_ref, *tmp;
- uint32 *p_cur;
- uint32 pkres;
- int result, result2, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = (uint32*)out; /* assume it's word aligned */
- curr_offset = (outpitch - blkwidth) >> 2;
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- if (dx&1)
- {
- dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
-
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = p_ref + blkwidth;
- for (; p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- pkres = (result >> 1);
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dx] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 3; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
- else
- {
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = p_ref + blkwidth;
- for (; p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- pkres |= (result << 24);
- *p_cur++ = pkres; /* write 4 pixels */
- p_ref -= 3; /* offset back to the middle of filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset; /* move to the next line */
- }
- }
-
- return ;
-}
-
-void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight)
-{
- uint8 *p_ref, *tmp;
- int *p_cur;
- int result, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = (outpitch - blkwidth);
- p_ref = in;
- ref_offset = inpitch - blkwidth;
-
- for (j = blkheight; j > 0 ; j--)
- {
- tmp = p_ref + blkwidth;
- for (; p_ref < tmp;)
- {
-
- r0 = p_ref[-2];
- r1 = p_ref[-1];
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- *p_cur++ = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- *p_cur++ = result;
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- *p_cur++ = result;
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- *p_cur++ = result;
- p_ref -= 3; /* move back to the middle of the filter */
- }
- p_cur += curr_offset; /* move to the next line */
- p_ref += ref_offset;
- }
-
- return ;
-}
-void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy)
-{
- uint8 *p_cur, *p_ref, *tmp;
- int result, curr_offset, ref_offset;
- int j, i;
- int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13;
- uint8 tmp_in[24][24];
-
- /* not word-aligned */
- if (((intptr_t)in)&0x3)
- {
- eCreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
- in = &tmp_in[2][0];
- inpitch = 24;
- }
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- curr_offset += 3;
-
- if (dy&1)
- {
- dy = (dy >> 1) ? 0 : -inpitch;
-
- for (j = 0; j < blkwidth; j += 4, in += 4)
- {
- r13 = 0;
- p_ref = in;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
-
- r1 = *((uint32*)(p_ref + dy));
- r2 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r0 += r1;
- r6 += r2;
- r0 += 0x10001;
- r6 += 0x10001;
- r0 = (r0 >> 1) & 0xFF00FF;
- r6 = (r6 >> 1) & 0xFF00FF;
-
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in + i;
- p_cur -= outpitch; /* compensate for the first offset */
-
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- /* 3/4 pel, no need to clip */
- result = (result + p_ref[dy-(inpitch<<1)] + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
- }
- else
- {
- for (j = 0; j < blkwidth; j += 4, in += 4)
- {
- r13 = 0;
- p_ref = in;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
- r0 &= 0xFF00FF;
- r6 &= 0xFF00FF;
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in + i;
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
- }
-
- return ;
-}
-
-void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int *p_cur;
- uint8 *p_ref, *tmp;
- int result, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
-
- return ;
-}
-
-void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight, int dy)
-{
- uint8 *p_cur;
- int *p_ref, *tmp;
- int result, result2, curr_offset, ref_offset;
- int j, r0, r1, r2, r3, r4, r5;
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
- ref_offset = blkheight * inpitch; /* for limit */
-
- if (dy&1)
- {
- dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch;
-
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- result2 = ((p_ref[dy] + 16) >> 5);
- CLIP_RESULT(result2)
- /* 3/4 pel, no need to clip */
- result = (result + result2 + 1);
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
- }
- else
- {
- for (j = 0; j < blkwidth; j++)
- {
- p_cur -= outpitch; /* compensate for the first offset */
- p_ref = in++;
-
- tmp = p_ref + ref_offset; /* limit */
- while (p_ref < tmp)
- { /* loop un-rolled */
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 512) >> 10;
- CLIP_RESULT(result)
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += curr_offset;
- }
- }
-
- return ;
-}
-
-void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
- uint8 *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int j, i;
- int result;
- uint8 *p_cur, *p_ref, *p_tmp8, *tmp;
- int curr_offset, ref_offset;
- uint8 tmp_res[24][24], tmp_in[24][24];
- uint32 *p_tmp;
- uint32 pkres, tmp_result;
- int32 r0, r1, r2, r3, r4, r5;
- int32 r6, r7, r8, r9, r10, r13;
-
- ref_offset = inpitch - blkwidth;
- p_ref = in1 - 2;
- /* perform horizontal interpolation */
- /* not word-aligned */
- /* It is faster to read 1 byte at time to avoid calling CreateAlign */
- /* if(((uint32)p_ref)&0x3)
- {
- CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight);
- p_ref = &tmp_in[0][0];
- ref_offset = 24-blkwidth;
- }*/
-
- p_tmp = (uint32*) & (tmp_res[0][0]);
- for (j = blkheight; j > 0; j--)
- {
- r13 = 0;
- tmp = p_ref + blkwidth;
-
- //r0 = *((uint32*)p_ref); /* d,c,b,a */
- //r1 = (r0>>8)&0xFF00FF; /* 0,d,0,b */
- //r0 &= 0xFF00FF; /* 0,c,0,a */
- /* It is faster to read 1 byte at a time */
- r0 = p_ref[0];
- r1 = p_ref[2];
- r0 |= (r1 << 16); /* 0,c,0,a */
- r1 = p_ref[1];
- r2 = p_ref[3];
- r1 |= (r2 << 16); /* 0,d,0,b */
-
- while (p_ref < tmp)
- {
- //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */
- //r3 = (r2>>8)&0xFF00FF; /* 0,h,0,f */
- //r2 &= 0xFF00FF; /* 0,g,0,e */
- /* It is faster to read 1 byte at a time */
- r2 = *(p_ref += 4);
- r3 = p_ref[2];
- r2 |= (r3 << 16); /* 0,g,0,e */
- r3 = p_ref[1];
- r4 = p_ref[3];
- r3 |= (r4 << 16); /* 0,h,0,f */
-
- r4 = r0 + r3; /* c+h, a+f */
- r5 = r0 + r1; /* c+d, a+b */
- r6 = r2 + r3; /* g+h, e+f */
- r5 >>= 16;
- r5 |= (r6 << 16); /* e+f, c+d */
- r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
- r4 += 0x100010; /* +16, +16 */
- r5 = r1 + r2; /* d+g, b+e */
- r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
- r4 >>= 5;
- r13 |= r4; /* check clipping */
- r4 &= 0xFF00FF; /* mask */
-
- r5 = p_ref[4]; /* i */
- r6 = (r5 << 16);
- r5 = r6 | (r2 >> 16);/* 0,i,0,g */
- r5 += r1; /* d+i, b+g */ /* r5 not free */
- r1 >>= 16;
- r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
- r1 += r2; /* f+g, d+e */
- r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
- r0 >>= 16;
- r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
- r0 += r3; /* e+h, c+f */
- r5 += 0x100010; /* 16,16 */
- r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
- r5 >>= 5;
- r13 |= r5; /* check clipping */
- r5 &= 0xFF00FF; /* mask */
-
- r4 |= (r5 << 8); /* pack them together */
- *p_tmp++ = r4;
- r1 = r3;
- r0 = r2;
- }
- p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
-
- if (r13&0xFF000700) /* need clipping */
- {
- /* move back to the beginning of the line */
- p_ref -= (ref_offset + blkwidth); /* input */
- p_tmp -= 6; /* intermediate output */
- tmp = p_ref + blkwidth;
- while (p_ref < tmp)
- {
- r0 = *p_ref++;
- r1 = *p_ref++;
- r2 = *p_ref++;
- r3 = *p_ref++;
- r4 = *p_ref++;
- /* first pixel */
- r5 = *p_ref++;
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres = result;
- /* second pixel */
- r0 = *p_ref++;
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 8);
- /* third pixel */
- r1 = *p_ref++;
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 16);
- /* fourth pixel */
- r2 = *p_ref++;
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- pkres |= (result << 24);
-
- *p_tmp++ = pkres; /* write 4 pixel */
- p_ref -= 5;
- }
- p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
- p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
- }
- }
-
- /* perform vertical interpolation */
- /* not word-aligned */
- if (((intptr_t)in2)&0x3)
- {
- eCreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
- in2 = &tmp_in[2][0];
- inpitch = 24;
- }
-
- p_cur = out;
- curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */
- pkres = blkheight * inpitch; /* reuse it for limit */
-
- curr_offset += 3;
-
- for (j = 0; j < blkwidth; j += 4, in2 += 4)
- {
- r13 = 0;
- p_ref = in2;
- p_tmp8 = &(tmp_res[0][j]); /* intermediate result */
- p_tmp8 -= 24; /* compensate for the first offset */
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = p_ref + pkres; /* limit */
- while (p_ref < tmp) /* the loop un-rolled */
- {
- /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign */
- /*p_ref8 = p_ref-(inpitch<<1); r0 = p_ref8[0]; r1 = p_ref8[2];
- r0 |= (r1<<16); r6 = p_ref8[1]; r1 = p_ref8[3];
- r6 |= (r1<<16); p_ref+=inpitch; */
- r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
- p_ref += inpitch;
- r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
- r0 &= 0xFF00FF;
-
- /*p_ref8 = p_ref+(inpitch<<1);
- r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16);
- r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
-
- r0 += r1;
- r6 += r7;
-
- /*r2 = p_ref[0]; r8 = p_ref[2]; r2 |= (r8<<16);
- r8 = p_ref[1]; r1 = p_ref[3]; r8 |= (r1<<16);*/
- r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- /*p_ref8 = p_ref-inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
- r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
- r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 += 20 * r1;
- r6 += 20 * r7;
- r0 += 0x100010;
- r6 += 0x100010;
-
- /*p_ref8 = p_ref-(inpitch<<1); r2 = p_ref8[0]; r8 = p_ref8[2];
- r2 |= (r8<<16); r8 = p_ref8[1]; r1 = p_ref8[3]; r8 |= (r1<<16);*/
- r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
- r8 = (r2 >> 8) & 0xFF00FF;
- r2 &= 0xFF00FF;
-
- /*p_ref8 = p_ref+inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
- r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
- r2 = p_ref8[3]; r7 |= (r2<<16);*/
- r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
- r7 = (r1 >> 8) & 0xFF00FF;
- r1 &= 0xFF00FF;
- r1 += r2;
-
- r7 += r8;
-
- r0 -= 5 * r1;
- r6 -= 5 * r7;
-
- r0 >>= 5;
- r6 >>= 5;
- /* clip */
- r13 |= r6;
- r13 |= r0;
- //CLIPPACK(r6,result)
- /* add with horizontal results */
- r10 = *((uint32*)(p_tmp8 += 24));
- r9 = (r10 >> 8) & 0xFF00FF;
- r10 &= 0xFF00FF;
-
- r0 += r10;
- r0 += 0x10001;
- r0 = (r0 >> 1) & 0xFF00FF; /* mask to 8 bytes */
-
- r6 += r9;
- r6 += 0x10001;
- r6 = (r6 >> 1) & 0xFF00FF; /* mask to 8 bytes */
-
- r0 |= (r6 << 8); /* pack it back */
- *((uint32*)(p_cur += outpitch)) = r0;
- }
- p_cur += curr_offset; /* offset to the next pixel */
- if (r13 & 0xFF000700) /* this column need clipping */
- {
- p_cur -= 4;
- for (i = 0; i < 4; i++)
- {
- p_ref = in2 + i;
- p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */
- p_tmp8 -= 24; /* compensate for the first offset */
- p_cur -= outpitch; /* compensate for the first offset */
- tmp = p_ref + pkres; /* limit */
- while (p_ref < tmp) /* the loop un-rolled */
- {
- r0 = *(p_ref - (inpitch << 1));
- r1 = *(p_ref - inpitch);
- r2 = *p_ref;
- r3 = *(p_ref += inpitch); /* modify pointer before loading */
- r4 = *(p_ref += inpitch);
- /* first pixel */
- r5 = *(p_ref += inpitch);
- result = (r0 + r5);
- r0 = (r1 + r4);
- result -= (r0 * 5);//result -= r0; result -= (r0<<2);
- r0 = (r2 + r3);
- result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* modify pointer before loading */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* second pixel */
- r0 = *(p_ref += inpitch);
- result = (r1 + r0);
- r1 = (r2 + r5);
- result -= (r1 * 5);//result -= r1; result -= (r1<<2);
- r1 = (r3 + r4);
- result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* third pixel */
- r1 = *(p_ref += inpitch);
- result = (r2 + r1);
- r2 = (r3 + r0);
- result -= (r2 * 5);//result -= r2; result -= (r2<<2);
- r2 = (r4 + r5);
- result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- /* fourth pixel */
- r2 = *(p_ref += inpitch);
- result = (r3 + r2);
- r3 = (r4 + r1);
- result -= (r3 * 5);//result -= r3; result -= (r3<<2);
- r3 = (r5 + r0);
- result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
- result = (result + 16) >> 5;
- CLIP_RESULT(result)
- tmp_result = *(p_tmp8 += 24); /* intermediate result */
- result = (result + tmp_result + 1); /* no clip */
- result = (result >> 1);
- *(p_cur += outpitch) = result;
- p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
- }
- p_cur += (curr_offset - 3);
- }
- }
- }
-
- return ;
-}
-
-/* position G */
-void eFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,
- int blkwidth, int blkheight)
-{
- int i, j;
- int offset_in = inpitch - blkwidth;
- int offset_out = outpitch - blkwidth;
- uint32 temp;
- uint8 byte;
-
- if (((intptr_t)in)&3)
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 4)
- {
- temp = *in++;
- byte = *in++;
- temp |= (byte << 8);
- byte = *in++;
- temp |= (byte << 16);
- byte = *in++;
- temp |= (byte << 24);
-
- *((uint32*)out) = temp; /* write 4 bytes */
- out += 4;
- }
- out += offset_out;
- in += offset_in;
- }
- }
- else
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 4)
- {
- temp = *((uint32*)in);
- *((uint32*)out) = temp;
- in += 4;
- out += 4;
- }
- out += offset_out;
- in += offset_in;
- }
- }
- return ;
-}
-
-void ePadChroma(uint8 *ref, int picwidth, int picheight, int picpitch, int x_pos, int y_pos)
-{
- int pad_height;
- int pad_width;
- uint8 *start;
- uint32 word1, word2, word3;
- int offset, j;
-
-
- pad_height = 8 + ((y_pos & 7) ? 1 : 0);
- pad_width = 8 + ((x_pos & 7) ? 1 : 0);
-
- y_pos >>= 3;
- x_pos >>= 3;
- // pad vertical first
- if (y_pos < 0) // need to pad up
- {
- if (x_pos < -8) start = ref - 8;
- else if (x_pos + pad_width > picwidth + 7) start = ref + picwidth + 7 - pad_width;
- else start = ref + x_pos;
-
- /* word-align start */
- offset = (intptr_t)start & 0x3;
- if (offset) start -= offset;
-
- word1 = *((uint32*)start);
- word2 = *((uint32*)(start + 4));
- word3 = *((uint32*)(start + 8));
-
- /* pad up N rows */
- j = -y_pos;
- if (j > 8) j = 8;
- while (j--)
- {
- *((uint32*)(start -= picpitch)) = word1;
- *((uint32*)(start + 4)) = word2;
- *((uint32*)(start + 8)) = word3;
- }
-
- }
- else if (y_pos + pad_height >= picheight) /* pad down */
- {
- if (x_pos < -8) start = ref + picpitch * (picheight - 1) - 8;
- else if (x_pos + pad_width > picwidth + 7) start = ref + picpitch * (picheight - 1) +
- picwidth + 7 - pad_width;
- else start = ref + picpitch * (picheight - 1) + x_pos;
-
- /* word-align start */
- offset = (intptr_t)start & 0x3;
- if (offset) start -= offset;
-
- word1 = *((uint32*)start);
- word2 = *((uint32*)(start + 4));
- word3 = *((uint32*)(start + 8));
-
- /* pad down N rows */
- j = y_pos + pad_height - picheight;
- if (j > 8) j = 8;
- while (j--)
- {
- *((uint32*)(start += picpitch)) = word1;
- *((uint32*)(start + 4)) = word2;
- *((uint32*)(start + 8)) = word3;
- }
- }
-
- /* now pad horizontal */
- if (x_pos < 0) // pad left
- {
- if (y_pos < -8) start = ref - (picpitch << 3);
- else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch;
- else start = ref + y_pos * picpitch;
-
- // now pad left 8 pixels for pad_height rows */
- j = pad_height;
- start -= picpitch;
- while (j--)
- {
- word1 = *(start += picpitch);
- word1 |= (word1 << 8);
- word1 |= (word1 << 16);
- *((uint32*)(start - 8)) = word1;
- *((uint32*)(start - 4)) = word1;
- }
- }
- else if (x_pos + pad_width >= picwidth) /* pad right */
- {
- if (y_pos < -8) start = ref - (picpitch << 3) + picwidth - 1;
- else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch + picwidth - 1;
- else start = ref + y_pos * picpitch + picwidth - 1;
-
- // now pad right 8 pixels for pad_height rows */
- j = pad_height;
- start -= picpitch;
- while (j--)
- {
- word1 = *(start += picpitch);
- word1 |= (word1 << 8);
- word1 |= (word1 << 16);
- *((uint32*)(start + 1)) = word1;
- *((uint32*)(start + 5)) = word1;
- }
- }
-
- return ;
-}
-
-
-void eChromaMotionComp(uint8 *ref, int picwidth, int picheight,
- int x_pos, int y_pos,
- uint8 *pred, int picpitch,
- int blkwidth, int blkheight)
-{
- int dx, dy;
- int offset_dx, offset_dy;
- int index;
-
- ePadChroma(ref, picwidth, picheight, picpitch, x_pos, y_pos);
-
- dx = x_pos & 7;
- dy = y_pos & 7;
- offset_dx = (dx + 7) >> 3;
- offset_dy = (dy + 7) >> 3;
- x_pos = x_pos >> 3; /* round it to full-pel resolution */
- y_pos = y_pos >> 3;
-
- ref += y_pos * picpitch + x_pos;
-
- index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7);
-
- (*(eChromaMC_SIMD[index]))(ref, picpitch , dx, dy, pred, picpitch, blkwidth, blkheight);
- return ;
-}
-
-
-/* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */
-void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- int32 r0, r1, r2, r3, result0, result1;
- uint8 temp[288];
- uint8 *ref, *out;
- int i, j;
- int dx_8 = 8 - dx;
- int dy_8 = 8 - dy;
-
- /* horizontal first */
- out = temp;
- for (i = 0; i < blkheight + 1; i++)
- {
- ref = pRef;
- r0 = ref[0];
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 |= (ref[2] << 16);
- result0 = dx_8 * r0;
-
- r1 = ref[1] | (ref[3] << 16);
- result0 += dx * r1;
- *(int32 *)out = result0;
-
- result0 = dx_8 * r1;
-
- r2 = ref[4];
- r0 = r0 >> 16;
- r1 = r0 | (r2 << 16);
- result0 += dx * r1;
- *(int32 *)(out + 16) = result0;
-
- ref += 4;
- out += 4;
- r0 = r2;
- }
- pRef += srcPitch;
- out += (32 - blkwidth);
- }
-
-// pRef -= srcPitch*(blkheight+1);
- ref = temp;
-
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 = *(int32 *)ref;
- r1 = *(int32 *)(ref + 16);
- ref += 32;
- out = pOut;
- for (i = 0; i < (blkheight >> 1); i++)
- {
- result0 = dy_8 * r0 + 0x00200020;
- r2 = *(int32 *)ref;
- result0 += dy * r2;
- result0 >>= 6;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00200020;
- r3 = *(int32 *)(ref + 16);
- result1 += dy * r3;
- result1 >>= 6;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- out += predPitch;
- ref += 32;
-
- result0 = dy_8 * r0 + 0x00200020;
- r2 = *(int32 *)ref;
- result0 += dy * r2;
- result0 >>= 6;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00200020;
- r3 = *(int32 *)(ref + 16);
- result1 += dy * r3;
- result1 >>= 6;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- out += predPitch;
- ref += 32;
- }
- pOut += 4;
- ref = temp + 4; /* since it can only iterate twice max */
- }
- return;
-}
-
-void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(dy);
-
- int32 r0, r1, r2, result0, result1;
- uint8 *ref, *out;
- int i, j;
- int dx_8 = 8 - dx;
-
- /* horizontal first */
- for (i = 0; i < blkheight; i++)
- {
- ref = pRef;
- out = pOut;
-
- r0 = ref[0];
- for (j = 0; j < blkwidth; j += 4)
- {
- r0 |= (ref[2] << 16);
- result0 = dx_8 * r0 + 0x00040004;
-
- r1 = ref[1] | (ref[3] << 16);
- result0 += dx * r1;
- result0 >>= 3;
- result0 &= 0x00FF00FF;
-
- result1 = dx_8 * r1 + 0x00040004;
-
- r2 = ref[4];
- r0 = r0 >> 16;
- r1 = r0 | (r2 << 16);
- result1 += dx * r1;
- result1 >>= 3;
- result1 &= 0x00FF00FF;
-
- *(int32 *)out = result0 | (result1 << 8);
-
- ref += 4;
- out += 4;
- r0 = r2;
- }
-
- pRef += srcPitch;
- pOut += predPitch;
- }
- return;
-}
-
-void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(dx);
-
- int32 r0, r1, r2, r3, result0, result1;
- int i, j;
- uint8 *ref, *out;
- int dy_8 = 8 - dy;
- /* vertical first */
- for (i = 0; i < blkwidth; i += 4)
- {
- ref = pRef;
- out = pOut;
-
- r0 = ref[0] | (ref[2] << 16);
- r1 = ref[1] | (ref[3] << 16);
- ref += srcPitch;
- for (j = 0; j < blkheight; j++)
- {
- result0 = dy_8 * r0 + 0x00040004;
- r2 = ref[0] | (ref[2] << 16);
- result0 += dy * r2;
- result0 >>= 3;
- result0 &= 0x00FF00FF;
- r0 = r2;
-
- result1 = dy_8 * r1 + 0x00040004;
- r3 = ref[1] | (ref[3] << 16);
- result1 += dy * r3;
- result1 >>= 3;
- result1 &= 0x00FF00FF;
- r1 = r3;
- *(int32 *)out = result0 | (result1 << 8);
- ref += srcPitch;
- out += predPitch;
- }
- pOut += 4;
- pRef += 4;
- }
- return;
-}
-
-void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(blkwidth);
-
- int32 r0, r1, temp0, temp1, result;
- int32 temp[9];
- int32 *out;
- int i, r_temp;
- int dy_8 = 8 - dy;
-
- /* horizontal first */
- out = temp;
- for (i = 0; i < blkheight + 1; i++)
- {
- r_temp = pRef[1];
- temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]);
- temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp);
- r0 = temp0 | (temp1 << 16);
- *out++ = r0;
- pRef += srcPitch;
- }
-
- pRef -= srcPitch * (blkheight + 1);
-
- out = temp;
-
- r0 = *out++;
-
- for (i = 0; i < blkheight; i++)
- {
- result = dy_8 * r0 + 0x00200020;
- r1 = *out++;
- result += dy * r1;
- result >>= 6;
- result &= 0x00FF00FF;
- *(int16 *)pOut = (result >> 8) | (result & 0xFF);
- r0 = r1;
- pOut += predPitch;
- }
- return;
-}
-
-void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(dy);
- (void)(blkwidth);
-
- int i, temp, temp0, temp1;
-
- /* horizontal first */
- for (i = 0; i < blkheight; i++)
- {
- temp = pRef[1];
- temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3;
- temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3;
-
- *(int16 *)pOut = temp0 | (temp1 << 8);
- pRef += srcPitch;
- pOut += predPitch;
-
- }
- return;
-}
-void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(dx);
- (void)(blkwidth);
-
- int32 r0, r1, result;
- int i;
- int dy_8 = 8 - dy;
- r0 = pRef[0] | (pRef[1] << 16);
- pRef += srcPitch;
- for (i = 0; i < blkheight; i++)
- {
- result = dy_8 * r0 + 0x00040004;
- r1 = pRef[0] | (pRef[1] << 16);
- result += dy * r1;
- result >>= 3;
- result &= 0x00FF00FF;
- *(int16 *)pOut = (result >> 8) | (result & 0xFF);
- r0 = r1;
- pRef += srcPitch;
- pOut += predPitch;
- }
- return;
-}
-
-void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
- uint8 *pOut, int predPitch, int blkwidth, int blkheight)
-{
- (void)(dx);
- (void)(dy);
-
- int i, j;
- int offset_in = srcPitch - blkwidth;
- int offset_out = predPitch - blkwidth;
- uint16 temp;
- uint8 byte;
-
- if (((intptr_t)pRef)&1)
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 2)
- {
- temp = *pRef++;
- byte = *pRef++;
- temp |= (byte << 8);
- *((uint16*)pOut) = temp; /* write 2 bytes */
- pOut += 2;
- }
- pOut += offset_out;
- pRef += offset_in;
- }
- }
- else
- {
- for (j = blkheight; j > 0; j--)
- {
- for (i = blkwidth; i > 0; i -= 2)
- {
- temp = *((uint16*)pRef);
- *((uint16*)pOut) = temp;
- pRef += 2;
- pOut += 2;
- }
- pOut += offset_out;
- pRef += offset_in;
- }
- }
- return ;
-}
diff --git a/media/libstagefright/codecs/avc/enc/src/motion_est.cpp b/media/libstagefright/codecs/avc/enc/src/motion_est.cpp
deleted file mode 100644
index 00c56c8..0000000
--- a/media/libstagefright/codecs/avc/enc/src/motion_est.cpp
+++ /dev/null
@@ -1,1774 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-#define MIN_GOP 1 /* minimum size of GOP, 1/23/01, need to be tested */
-
-#define DEFAULT_REF_IDX 0 /* always from the first frame in the reflist */
-
-#define ALL_CAND_EQUAL 10 /* any number greater than 5 will work */
-
-
-/* from TMN 3.2 */
-#define PREF_NULL_VEC 129 /* zero vector bias */
-#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
-#define PREF_INTRA 3024//512 /* bias for INTRA coding */
-
-const static int tab_exclude[9][9] = // [last_loc][curr_loc]
-{
- {0, 0, 0, 0, 0, 0, 0, 0, 0},
- {0, 0, 0, 0, 1, 1, 1, 0, 0},
- {0, 0, 0, 0, 1, 1, 1, 1, 1},
- {0, 0, 0, 0, 0, 0, 1, 1, 1},
- {0, 1, 1, 0, 0, 0, 1, 1, 1},
- {0, 1, 1, 0, 0, 0, 0, 0, 1},
- {0, 1, 1, 1, 1, 0, 0, 0, 1},
- {0, 0, 1, 1, 1, 0, 0, 0, 0},
- {0, 0, 1, 1, 1, 1, 1, 0, 0}
-}; //to decide whether to continue or compute
-
-const static int refine_next[8][2] = /* [curr_k][increment] */
-{
- {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}
-};
-
-#ifdef _SAD_STAT
-uint32 num_MB = 0;
-uint32 num_cand = 0;
-#endif
-
-/************************************************************************/
-#define TH_INTER_2 100 /* temporary for now */
-
-//#define FIXED_INTERPRED_MODE AVC_P16
-#define FIXED_REF_IDX 0
-#define FIXED_MVX 0
-#define FIXED_MVY 0
-
-// only use when AVC_P8 or AVC_P8ref0
-#define FIXED_SUBMB_MODE AVC_4x4
-/*************************************************************************/
-
-/* Initialize arrays necessary for motion search */
-AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- int search_range = rateCtrl->mvRange;
- int number_of_subpel_positions = 4 * (2 * search_range + 3);
- int max_mv_bits, max_mvd;
- int temp_bits = 0;
- uint8 *mvbits;
- int bits, imax, imin, i;
- uint8* subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions
-
-
- while (number_of_subpel_positions > 0)
- {
- temp_bits++;
- number_of_subpel_positions >>= 1;
- }
-
- max_mv_bits = 3 + 2 * temp_bits;
- max_mvd = (1 << (max_mv_bits >> 1)) - 1;
-
- encvid->mvbits_array = (uint8*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
- sizeof(uint8) * (2 * max_mvd + 1), DEFAULT_ATTR);
-
- if (encvid->mvbits_array == NULL)
- {
- return AVCENC_MEMORY_FAIL;
- }
-
- mvbits = encvid->mvbits = encvid->mvbits_array + max_mvd;
-
- mvbits[0] = 1;
- for (bits = 3; bits <= max_mv_bits; bits += 2)
- {
- imax = 1 << (bits >> 1);
- imin = imax >> 1;
-
- for (i = imin; i < imax; i++) mvbits[-i] = mvbits[i] = bits;
- }
-
- /* initialize half-pel search */
- encvid->hpel_cand[0] = subpel_pred + REF_CENTER;
- encvid->hpel_cand[1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1 ;
- encvid->hpel_cand[2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->hpel_cand[3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
- encvid->hpel_cand[4] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
- encvid->hpel_cand[5] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
- encvid->hpel_cand[6] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->hpel_cand[7] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->hpel_cand[8] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
-
- /* For quarter-pel interpolation around best half-pel result */
-
- encvid->bilin_base[0][0] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[0][1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->bilin_base[0][2] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->bilin_base[0][3] = subpel_pred + REF_CENTER;
-
-
- encvid->bilin_base[1][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[1][1] = subpel_pred + REF_CENTER - 24;
- encvid->bilin_base[1][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[1][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
-
- encvid->bilin_base[2][0] = subpel_pred + REF_CENTER - 24;
- encvid->bilin_base[2][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->bilin_base[2][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->bilin_base[2][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
-
- encvid->bilin_base[3][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->bilin_base[3][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
- encvid->bilin_base[3][2] = subpel_pred + REF_CENTER;
- encvid->bilin_base[3][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
-
- encvid->bilin_base[4][0] = subpel_pred + REF_CENTER;
- encvid->bilin_base[4][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
- encvid->bilin_base[4][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
- encvid->bilin_base[4][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
-
- encvid->bilin_base[5][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->bilin_base[5][1] = subpel_pred + REF_CENTER;
- encvid->bilin_base[5][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->bilin_base[5][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
-
- encvid->bilin_base[6][0] = subpel_pred + REF_CENTER - 1;
- encvid->bilin_base[6][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->bilin_base[6][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 24;
- encvid->bilin_base[6][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
-
- encvid->bilin_base[7][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[7][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[7][2] = subpel_pred + REF_CENTER - 1;
- encvid->bilin_base[7][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
-
- encvid->bilin_base[8][0] = subpel_pred + REF_CENTER - 25;
- encvid->bilin_base[8][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[8][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;
- encvid->bilin_base[8][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
-
-
- return AVCENC_SUCCESS;
-}
-
-/* Clean-up memory */
-void CleanMotionSearchModule(AVCHandle *avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
-
- if (encvid->mvbits_array)
- {
- avcHandle->CBAVC_Free(avcHandle->userData, encvid->mvbits_array);
- encvid->mvbits = NULL;
- }
-
- return ;
-}
-
-
-bool IntraDecisionABE(int *min_cost, uint8 *cur, int pitch, bool ave)
-{
- int j;
- uint8 *out;
- int temp, SBE;
- OsclFloat ABE;
- bool intra = true;
-
- SBE = 0;
- /* top neighbor */
- out = cur - pitch;
- for (j = 0; j < 16; j++)
- {
- temp = out[j] - cur[j];
- SBE += ((temp >= 0) ? temp : -temp);
- }
-
- /* left neighbor */
- out = cur - 1;
- out -= pitch;
- cur -= pitch;
- for (j = 0; j < 16; j++)
- {
- temp = *(out += pitch) - *(cur += pitch);
- SBE += ((temp >= 0) ? temp : -temp);
- }
-
- /* compare mincost/384 and SBE/64 */
- ABE = SBE / 32.0; //ABE = SBE/64.0; //
- if (ABE >= *min_cost / 256.0) //if( ABE*0.8 >= min_cost/384.0) //
- {
- intra = false; // no possibility of intra, just use inter
- }
- else
- {
- if (ave == true)
- {
- *min_cost = (*min_cost + (int)(SBE * 8)) >> 1; // possibility of intra, averaging the cost
- }
- else
- {
- *min_cost = (int)(SBE * 8);
- }
- }
-
- return intra;
-}
-
-/******* main function for macroblock prediction for the entire frame ***/
-/* if turns out to be IDR frame, set video->nal_unit_type to AVC_NALTYPE_IDR */
-void AVCMotionEstimation(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- int slice_type = video->slice_type;
- AVCFrameIO *currInput = encvid->currInput;
- AVCPictureData *refPic = video->RefPicList0[0];
- int i, j, k;
- int mbwidth = video->PicWidthInMbs;
- int mbheight = video->PicHeightInMbs;
- int totalMB = video->PicSizeInMbs;
- int pitch = currInput->pitch;
- AVCMacroblock *currMB, *mblock = video->mblock;
- AVCMV *mot_mb_16x16, *mot16x16 = encvid->mot16x16;
- // AVCMV *mot_mb_16x8, *mot_mb_8x16, *mot_mb_8x8, etc;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- uint8 *intraSearch = encvid->intraSearch;
- uint FS_en = encvid->fullsearch_enable;
-
- int NumIntraSearch, start_i, numLoop, incr_i;
- int mbnum, offset;
- uint8 *cur, *best_cand[5];
- int totalSAD = 0; /* average SAD for rate control */
- int type_pred;
- int abe_cost;
-
-#ifdef HTFM
- /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */
- int collect = 0;
- HTFM_Stat htfm_stat;
- double newvar[16];
- double exp_lamda[15];
- /*********************************/
-#endif
- int hp_guess = 0;
- uint32 mv_uint32;
-
- offset = 0;
-
- if (slice_type == AVC_I_SLICE)
- {
- /* cannot do I16 prediction here because it needs full decoding. */
- for (i = 0; i < totalMB; i++)
- {
- encvid->min_cost[i] = 0x7FFFFFFF; /* max value for int */
- }
-
- memset(intraSearch, 1, sizeof(uint8)*totalMB);
-
- encvid->firstIntraRefreshMBIndx = 0; /* reset this */
-
- return ;
- }
- else // P_SLICE
- {
- for (i = 0; i < totalMB; i++)
- {
- mblock[i].mb_intra = 0;
- }
- memset(intraSearch, 1, sizeof(uint8)*totalMB);
- }
-
- if (refPic->padded == 0)
- {
- AVCPaddingEdge(refPic);
- refPic->padded = 1;
- }
- /* Random INTRA update */
- if (rateCtrl->intraMBRate)
- {
- AVCRasterIntraUpdate(encvid, mblock, totalMB, rateCtrl->intraMBRate);
- }
-
- encvid->sad_extra_info = NULL;
-#ifdef HTFM
- /***** HYPOTHESIS TESTING ********/
- InitHTFM(video, &htfm_stat, newvar, &collect);
- /*********************************/
-#endif
-
- if ((rateCtrl->scdEnable == 1)
- && ((rateCtrl->frame_rate < 5.0) || (video->sliceHdr->frame_num > MIN_GOP)))
- /* do not try to detect a new scene if low frame rate and too close to previous I-frame */
- {
- incr_i = 2;
- numLoop = 2;
- start_i = 1;
- type_pred = 0; /* for initial candidate selection */
- }
- else
- {
- incr_i = 1;
- numLoop = 1;
- start_i = 0;
- type_pred = 2;
- }
-
- /* First pass, loop thru half the macroblock */
- /* determine scene change */
- /* Second pass, for the rest of macroblocks */
- NumIntraSearch = 0; // to be intra searched in the encoding loop.
- while (numLoop--)
- {
- for (j = 0; j < mbheight; j++)
- {
- if (incr_i > 1)
- start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */
-
- offset = pitch * (j << 4) + (start_i << 4);
-
- mbnum = j * mbwidth + start_i;
-
- for (i = start_i; i < mbwidth; i += incr_i)
- {
- video->mbNum = mbnum;
- video->currMB = currMB = mblock + mbnum;
- mot_mb_16x16 = mot16x16 + mbnum;
-
- cur = currInput->YCbCr[0] + offset;
-
- if (currMB->mb_intra == 0) /* for INTER mode */
- {
-#if defined(HTFM)
- HTFMPrepareCurMB_AVC(encvid, &htfm_stat, cur, pitch);
-#else
- AVCPrepareCurMB(encvid, cur, pitch);
-#endif
- /************************************************************/
- /******** full-pel 1MV search **********************/
-
- AVCMBMotionSearch(encvid, cur, best_cand, i << 4, j << 4, type_pred,
- FS_en, &hp_guess);
-
- abe_cost = encvid->min_cost[mbnum] = mot_mb_16x16->sad;
-
- /* set mbMode and MVs */
- currMB->mbMode = AVC_P16;
- currMB->MBPartPredMode[0][0] = AVC_Pred_L0;
- mv_uint32 = ((mot_mb_16x16->y) << 16) | ((mot_mb_16x16->x) & 0xffff);
- for (k = 0; k < 32; k += 2)
- {
- currMB->mvL0[k>>1] = mv_uint32;
- }
-
- /* make a decision whether it should be tested for intra or not */
- if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)
- {
- if (false == IntraDecisionABE(&abe_cost, cur, pitch, true))
- {
- intraSearch[mbnum] = 0;
- }
- else
- {
- NumIntraSearch++;
- rateCtrl->MADofMB[mbnum] = abe_cost;
- }
- }
- else // boundary MBs, always do intra search
- {
- NumIntraSearch++;
- }
-
- totalSAD += (int) rateCtrl->MADofMB[mbnum];//mot_mb_16x16->sad;
- }
- else /* INTRA update, use for prediction */
- {
- mot_mb_16x16[0].x = mot_mb_16x16[0].y = 0;
-
- /* reset all other MVs to zero */
- /* mot_mb_16x8, mot_mb_8x16, mot_mb_8x8, etc. */
- abe_cost = encvid->min_cost[mbnum] = 0x7FFFFFFF; /* max value for int */
-
- if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)
- {
- IntraDecisionABE(&abe_cost, cur, pitch, false);
-
- rateCtrl->MADofMB[mbnum] = abe_cost;
- totalSAD += abe_cost;
- }
-
- NumIntraSearch++ ;
- /* cannot do I16 prediction here because it needs full decoding. */
- // intraSearch[mbnum] = 1;
-
- }
-
- mbnum += incr_i;
- offset += (incr_i << 4);
-
- } /* for i */
- } /* for j */
-
- /* since we cannot do intra/inter decision here, the SCD has to be
- based on other criteria such as motion vectors coherency or the SAD */
- if (incr_i > 1 && numLoop) /* scene change on and first loop */
- {
- //if(NumIntraSearch > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */
- if (NumIntraSearch*99 > (48*totalMB)) /* 20% of 50%MBs */
- /* need to do more investigation about this threshold since the NumIntraSearch
- only show potential intra MBs, not the actual one */
- {
- /* we can choose to just encode I_SLICE without IDR */
- //video->nal_unit_type = AVC_NALTYPE_IDR;
- video->nal_unit_type = AVC_NALTYPE_SLICE;
- video->sliceHdr->slice_type = AVC_I_ALL_SLICE;
- video->slice_type = AVC_I_SLICE;
- memset(intraSearch, 1, sizeof(uint8)*totalMB);
- i = totalMB;
- while (i--)
- {
- mblock[i].mb_intra = 1;
- encvid->min_cost[i] = 0x7FFFFFFF; /* max value for int */
- }
-
- rateCtrl->totalSAD = totalSAD * 2; /* SAD */
-
- return ;
- }
- }
- /******** no scene change, continue motion search **********************/
- start_i = 0;
- type_pred++; /* second pass */
- }
-
- rateCtrl->totalSAD = totalSAD; /* SAD */
-
-#ifdef HTFM
- /***** HYPOTHESIS TESTING ********/
- if (collect)
- {
- collect = 0;
- UpdateHTFM(encvid, newvar, exp_lamda, &htfm_stat);
- }
- /*********************************/
-#endif
-
- return ;
-}
-
-/*=====================================================================
- Function: PaddingEdge
- Date: 09/16/2000
- Purpose: Pad edge of a Vop
-=====================================================================*/
-
-void AVCPaddingEdge(AVCPictureData *refPic)
-{
- uint8 *src, *dst;
- int i;
- int pitch, width, height;
- uint32 temp1, temp2;
-
- width = refPic->width;
- height = refPic->height;
- pitch = refPic->pitch;
-
- /* pad top */
- src = refPic->Sl;
-
- temp1 = *src; /* top-left corner */
- temp2 = src[width-1]; /* top-right corner */
- temp1 |= (temp1 << 8);
- temp1 |= (temp1 << 16);
- temp2 |= (temp2 << 8);
- temp2 |= (temp2 << 16);
-
- dst = src - (pitch << 4);
-
- *((uint32*)(dst - 16)) = temp1;
- *((uint32*)(dst - 12)) = temp1;
- *((uint32*)(dst - 8)) = temp1;
- *((uint32*)(dst - 4)) = temp1;
-
- memcpy(dst, src, width);
-
- *((uint32*)(dst += width)) = temp2;
- *((uint32*)(dst + 4)) = temp2;
- *((uint32*)(dst + 8)) = temp2;
- *((uint32*)(dst + 12)) = temp2;
-
- dst = dst - width - 16;
-
- i = 15;
- while (i--)
- {
- memcpy(dst + pitch, dst, pitch);
- dst += pitch;
- }
-
- /* pad sides */
- dst += (pitch + 16);
- src = dst;
- i = height;
- while (i--)
- {
- temp1 = *src;
- temp2 = src[width-1];
- temp1 |= (temp1 << 8);
- temp1 |= (temp1 << 16);
- temp2 |= (temp2 << 8);
- temp2 |= (temp2 << 16);
-
- *((uint32*)(dst - 16)) = temp1;
- *((uint32*)(dst - 12)) = temp1;
- *((uint32*)(dst - 8)) = temp1;
- *((uint32*)(dst - 4)) = temp1;
-
- *((uint32*)(dst += width)) = temp2;
- *((uint32*)(dst + 4)) = temp2;
- *((uint32*)(dst + 8)) = temp2;
- *((uint32*)(dst + 12)) = temp2;
-
- src += pitch;
- dst = src;
- }
-
- /* pad bottom */
- dst -= 16;
- i = 16;
- while (i--)
- {
- memcpy(dst, dst - pitch, pitch);
- dst += pitch;
- }
-
-
- return ;
-}
-
-/*===========================================================================
- Function: AVCRasterIntraUpdate
- Date: 2/26/01
- Purpose: To raster-scan assign INTRA-update .
- N macroblocks are updated (also was programmable).
-===========================================================================*/
-void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh)
-{
- int indx, i;
-
- indx = encvid->firstIntraRefreshMBIndx;
- for (i = 0; i < numRefresh && indx < totalMB; i++)
- {
- (mblock + indx)->mb_intra = 1;
- encvid->intraSearch[indx++] = 1;
- }
-
- /* if read the end of frame, reset and loop around */
- if (indx >= totalMB - 1)
- {
- indx = 0;
- while (i < numRefresh && indx < totalMB)
- {
- (mblock + indx)->mb_intra = 1;
- encvid->intraSearch[indx++] = 1;
- i++;
- }
- }
-
- encvid->firstIntraRefreshMBIndx = indx; /* update with a new value */
-
- return ;
-}
-
-
-#ifdef HTFM
-void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect)
-{
- AVCCommonObj *video = encvid->common;
- int i;
- int lx = video->currPic->width; // padding
- int lx2 = lx << 1;
- int lx3 = lx2 + lx;
- int rx = video->currPic->pitch;
- int rx2 = rx << 1;
- int rx3 = rx2 + rx;
-
- int *offset, *offset2;
-
- /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */
- if (((int)video->sliceHdr->frame_num) % 30 == 1)
- {
-
- *collect = 1;
-
- htfm_stat->countbreak = 0;
- htfm_stat->abs_dif_mad_avg = 0;
-
- for (i = 0; i < 16; i++)
- {
- newvar[i] = 0.0;
- }
-// encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect;
- encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect;
- encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
- encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh;
- encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh;
- encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh;
- encvid->sad_extra_info = (void*)(htfm_stat);
- offset = htfm_stat->offsetArray;
- offset2 = htfm_stat->offsetRef;
- }
- else
- {
-// encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM;
- encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM;
- encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
- encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh;
- encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh;
- encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh;
- encvid->sad_extra_info = (void*)(encvid->nrmlz_th);
- offset = encvid->nrmlz_th + 16;
- offset2 = encvid->nrmlz_th + 32;
- }
-
- offset[0] = 0;
- offset[1] = lx2 + 2;
- offset[2] = 2;
- offset[3] = lx2;
- offset[4] = lx + 1;
- offset[5] = lx3 + 3;
- offset[6] = lx + 3;
- offset[7] = lx3 + 1;
- offset[8] = lx;
- offset[9] = lx3 + 2;
- offset[10] = lx3 ;
- offset[11] = lx + 2 ;
- offset[12] = 1;
- offset[13] = lx2 + 3;
- offset[14] = lx2 + 1;
- offset[15] = 3;
-
- offset2[0] = 0;
- offset2[1] = rx2 + 2;
- offset2[2] = 2;
- offset2[3] = rx2;
- offset2[4] = rx + 1;
- offset2[5] = rx3 + 3;
- offset2[6] = rx + 3;
- offset2[7] = rx3 + 1;
- offset2[8] = rx;
- offset2[9] = rx3 + 2;
- offset2[10] = rx3 ;
- offset2[11] = rx + 2 ;
- offset2[12] = 1;
- offset2[13] = rx2 + 3;
- offset2[14] = rx2 + 1;
- offset2[15] = 3;
-
- return ;
-}
-
-void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat)
-{
- if (htfm_stat->countbreak == 0)
- htfm_stat->countbreak = 1;
-
- newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.);
-
- if (newvar[0] < 0.001)
- {
- newvar[0] = 0.001; /* to prevent floating overflow */
- }
- exp_lamda[0] = 1 / (newvar[0] * 1.4142136);
- exp_lamda[1] = exp_lamda[0] * 1.5825;
- exp_lamda[2] = exp_lamda[0] * 2.1750;
- exp_lamda[3] = exp_lamda[0] * 3.5065;
- exp_lamda[4] = exp_lamda[0] * 3.1436;
- exp_lamda[5] = exp_lamda[0] * 3.5315;
- exp_lamda[6] = exp_lamda[0] * 3.7449;
- exp_lamda[7] = exp_lamda[0] * 4.5854;
- exp_lamda[8] = exp_lamda[0] * 4.6191;
- exp_lamda[9] = exp_lamda[0] * 5.4041;
- exp_lamda[10] = exp_lamda[0] * 6.5974;
- exp_lamda[11] = exp_lamda[0] * 10.5341;
- exp_lamda[12] = exp_lamda[0] * 10.0719;
- exp_lamda[13] = exp_lamda[0] * 12.0516;
- exp_lamda[14] = exp_lamda[0] * 15.4552;
-
- CalcThreshold(HTFM_Pf, exp_lamda, encvid->nrmlz_th);
- return ;
-}
-
-
-void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[])
-{
- int i;
- double temp[15];
- // printf("\nLamda: ");
-
- /* parametric PREMODELling */
- for (i = 0; i < 15; i++)
- {
- // printf("%g ",exp_lamda[i]);
- if (pf < 0.5)
- temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf);
- else
- temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf));
- }
-
- nrmlz_th[15] = 0;
- for (i = 0; i < 15; i++) /* scale upto no.pixels */
- nrmlz_th[i] = (int)(temp[i] * ((i + 1) << 4) + 0.5);
-
- return ;
-}
-
-void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch)
-{
- AVCCommonObj *video = encvid->common;
- uint32 *htfmMB = (uint32*)(encvid->currYMB);
- uint8 *ptr, byte;
- int *offset;
- int i;
- uint32 word;
-
- if (((int)video->sliceHdr->frame_num) % 30 == 1)
- {
- offset = htfm_stat->offsetArray;
- }
- else
- {
- offset = encvid->nrmlz_th + 16;
- }
-
- for (i = 0; i < 16; i++)
- {
- ptr = cur + offset[i];
- word = ptr[0];
- byte = ptr[4];
- word |= (byte << 8);
- byte = ptr[8];
- word |= (byte << 16);
- byte = ptr[12];
- word |= (byte << 24);
- *htfmMB++ = word;
-
- word = *(ptr += (pitch << 2));
- byte = ptr[4];
- word |= (byte << 8);
- byte = ptr[8];
- word |= (byte << 16);
- byte = ptr[12];
- word |= (byte << 24);
- *htfmMB++ = word;
-
- word = *(ptr += (pitch << 2));
- byte = ptr[4];
- word |= (byte << 8);
- byte = ptr[8];
- word |= (byte << 16);
- byte = ptr[12];
- word |= (byte << 24);
- *htfmMB++ = word;
-
- word = *(ptr += (pitch << 2));
- byte = ptr[4];
- word |= (byte << 8);
- byte = ptr[8];
- word |= (byte << 16);
- byte = ptr[12];
- word |= (byte << 24);
- *htfmMB++ = word;
- }
-
- return ;
-}
-
-
-#endif // HTFM
-
-void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch)
-{
- void* tmp = (void*)(encvid->currYMB);
- uint32 *currYMB = (uint32*) tmp;
- int i;
-
- cur -= pitch;
-
- for (i = 0; i < 16; i++)
- {
- *currYMB++ = *((uint32*)(cur += pitch));
- *currYMB++ = *((uint32*)(cur + 4));
- *currYMB++ = *((uint32*)(cur + 8));
- *currYMB++ = *((uint32*)(cur + 12));
- }
-
- return ;
-}
-
-#ifdef FIXED_INTERPRED_MODE
-
-/* due to the complexity of the predicted motion vector, we may not decide to skip
-a macroblock here just yet. */
-/* We will find the best motion vector and the best intra prediction mode for each block. */
-/* output are
- currMB->NumMbPart, currMB->MbPartWidth, currMB->MbPartHeight,
- currMB->NumSubMbPart[], currMB->SubMbPartWidth[], currMB->SubMbPartHeight,
- currMB->MBPartPredMode[][] (L0 or L1 or BiPred)
- currMB->RefIdx[], currMB->ref_idx_L0[],
- currMB->mvL0[], currMB->mvL1[]
- */
-
-AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,
- int num_pass)
-{
- AVCCommonObj *video = encvid->common;
- int mbPartIdx, subMbPartIdx;
- int16 *mv;
- int i;
- int SubMbPartHeight, SubMbPartWidth, NumSubMbPart;
-
- /* assign value to currMB->MBPartPredMode[][x],subMbMode[],NumSubMbPart[],SubMbPartWidth[],SubMbPartHeight[] */
-
- currMB->mbMode = FIXED_INTERPRED_MODE;
- currMB->mb_intra = 0;
-
- if (currMB->mbMode == AVC_P16)
- {
- currMB->NumMbPart = 1;
- currMB->MbPartWidth = 16;
- currMB->MbPartHeight = 16;
- currMB->SubMbPartHeight[0] = 16;
- currMB->SubMbPartWidth[0] = 16;
- currMB->NumSubMbPart[0] = 1;
- }
- else if (currMB->mbMode == AVC_P16x8)
- {
- currMB->NumMbPart = 2;
- currMB->MbPartWidth = 16;
- currMB->MbPartHeight = 8;
- for (i = 0; i < 2; i++)
- {
- currMB->SubMbPartWidth[i] = 16;
- currMB->SubMbPartHeight[i] = 8;
- currMB->NumSubMbPart[i] = 1;
- }
- }
- else if (currMB->mbMode == AVC_P8x16)
- {
- currMB->NumMbPart = 2;
- currMB->MbPartWidth = 8;
- currMB->MbPartHeight = 16;
- for (i = 0; i < 2; i++)
- {
- currMB->SubMbPartWidth[i] = 8;
- currMB->SubMbPartHeight[i] = 16;
- currMB->NumSubMbPart[i] = 1;
- }
- }
- else if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)
- {
- currMB->NumMbPart = 4;
- currMB->MbPartWidth = 8;
- currMB->MbPartHeight = 8;
- if (FIXED_SUBMB_MODE == AVC_8x8)
- {
- SubMbPartHeight = 8;
- SubMbPartWidth = 8;
- NumSubMbPart = 1;
- }
- else if (FIXED_SUBMB_MODE == AVC_8x4)
- {
- SubMbPartHeight = 4;
- SubMbPartWidth = 8;
- NumSubMbPart = 2;
- }
- else if (FIXED_SUBMB_MODE == AVC_4x8)
- {
- SubMbPartHeight = 8;
- SubMbPartWidth = 4;
- NumSubMbPart = 2;
- }
- else if (FIXED_SUBMB_MODE == AVC_4x4)
- {
- SubMbPartHeight = 4;
- SubMbPartWidth = 4;
- NumSubMbPart = 4;
- }
-
- for (i = 0; i < 4; i++)
- {
- currMB->subMbMode[i] = FIXED_SUBMB_MODE;
- currMB->SubMbPartHeight[i] = SubMbPartHeight;
- currMB->SubMbPartWidth[i] = SubMbPartWidth;
- currMB->NumSubMbPart[i] = NumSubMbPart;
- }
- }
- else /* it's probably intra mode */
- {
- return AVCENC_SUCCESS;
- }
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- currMB->MBPartPredMode[mbPartIdx][0] = AVC_Pred_L0;
- currMB->ref_idx_L0[mbPartIdx] = FIXED_REF_IDX;
- currMB->RefIdx[mbPartIdx] = video->RefPicList0[FIXED_REF_IDX]->RefIdx;
-
- for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++)
- {
- mv = (int16*)(currMB->mvL0 + (mbPartIdx << 2) + subMbPartIdx);
-
- *mv++ = FIXED_MVX;
- *mv = FIXED_MVY;
- }
- }
-
- encvid->min_cost = 0;
-
- return AVCENC_SUCCESS;
-}
-
-#else /* perform the search */
-
-/* This option #1 search is very similar to PV's MPEG4 motion search algorithm.
- The search is done in hierarchical manner from 16x16 MB down to smaller and smaller
- partition. At each level, a decision can be made to stop the search if the expected
- prediction gain is not worth the computation. The decision can also be made at the finest
- level for more fullsearch-like behavior with the price of heavier computation. */
-void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],
- int i0, int j0, int type_pred, int FS_en, int *hp_guess)
-{
- AVCCommonObj *video = encvid->common;
- AVCPictureData *currPic = video->currPic;
- AVCSeqParamSet *currSPS = video->currSeqParams;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCMacroblock *currMB = video->currMB;
- uint8 *ref, *cand, *ncand;
- void *extra_info = encvid->sad_extra_info;
- int mbnum = video->mbNum;
- int width = currPic->width; /* 6/12/01, must be multiple of 16 */
- int height = currPic->height;
- AVCMV *mot16x16 = encvid->mot16x16;
- int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;
-
- int range = rateCtrl->mvRange;
-
- int lx = currPic->pitch; /* padding */
- int i, j, imin, jmin, ilow, ihigh, jlow, jhigh;
- int d, dmin, dn[9];
- int k;
- int mvx[5], mvy[5];
- int num_can, center_again;
- int last_loc, new_loc = 0;
- int step, max_step = range >> 1;
- int next;
-
- int cmvx, cmvy; /* estimated predicted MV */
- int lev_idx;
- int lambda_motion = encvid->lambda_motion;
- uint8 *mvbits = encvid->mvbits;
- int mvshift = 2;
- int mvcost;
-
- int min_sad = 65535;
-
- ref = video->RefPicList0[DEFAULT_REF_IDX]->Sl; /* origin of actual frame */
-
- /* have to initialize these params, necessary for interprediction part */
- currMB->NumMbPart = 1;
- currMB->SubMbPartHeight[0] = 16;
- currMB->SubMbPartWidth[0] = 16;
- currMB->NumSubMbPart[0] = 1;
- currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =
- currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = DEFAULT_REF_IDX;
- currMB->ref_idx_L1[0] = currMB->ref_idx_L1[1] =
- currMB->ref_idx_L1[2] = currMB->ref_idx_L1[3] = DEFAULT_REF_IDX;
- currMB->RefIdx[0] = currMB->RefIdx[1] =
- currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[DEFAULT_REF_IDX]->RefIdx;
-
- cur = encvid->currYMB; /* use smaller memory space for current MB */
-
- /* find limit of the search (adjusting search range)*/
- lev_idx = mapLev2Idx[currSPS->level_idc];
-
- /* we can make this part dynamic based on previous statistics */
- ilow = i0 - range;
- if (i0 - ilow > 2047) /* clip to conform with the standard */
- {
- ilow = i0 - 2047;
- }
- if (ilow < -13) // change it from -15 to -13 because of 6-tap filter needs extra 2 lines.
- {
- ilow = -13;
- }
-
- ihigh = i0 + range - 1;
- if (ihigh - i0 > 2047) /* clip to conform with the standard */
- {
- ihigh = i0 + 2047;
- }
- if (ihigh > width - 3)
- {
- ihigh = width - 3; // change from width-1 to width-3 for the same reason as above
- }
-
- jlow = j0 - range;
- if (j0 - jlow > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */
- {
- jlow = j0 - MaxVmvR[lev_idx] + 1;
- }
- if (jlow < -13) // same reason as above
- {
- jlow = -13;
- }
-
- jhigh = j0 + range - 1;
- if (jhigh - j0 > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */
- {
- jhigh = j0 + MaxVmvR[lev_idx] - 1;
- }
- if (jhigh > height - 3) // same reason as above
- {
- jhigh = height - 3;
- }
-
- /* find initial motion vector & predicted MV*/
- AVCCandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, encvid, type_pred, &cmvx, &cmvy);
-
- imin = i0;
- jmin = j0; /* needed for fullsearch */
- ncand = ref + i0 + j0 * lx;
-
- /* for first row of MB, fullsearch can be used */
- if (FS_en)
- {
- *hp_guess = 0; /* no guess for fast half-pel */
-
- dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);
-
- ncand = ref + imin + jmin * lx;
- }
- else
- { /* fullsearch the top row to only upto (0,3) MB */
- /* upto 30% complexity saving with the same complexity */
- if (video->PrevRefFrameNum == 0 && j0 == 0 && i0 <= 64 && type_pred != 1)
- {
- *hp_guess = 0; /* no guess for fast half-pel */
- dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);
- ncand = ref + imin + jmin * lx;
- }
- else
- {
- /************** initialize candidate **************************/
-
- dmin = 65535;
-
- /* check if all are equal */
- if (num_can == ALL_CAND_EQUAL)
- {
- i = i0 + mvx[0];
- j = j0 + mvy[0];
-
- if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
- {
- cand = ref + i + j * lx;
-
- d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
- mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
- d += mvcost;
-
- if (d < dmin)
- {
- dmin = d;
- imin = i;
- jmin = j;
- ncand = cand;
- min_sad = d - mvcost; // for rate control
- }
- }
- }
- else
- {
- /************** evaluate unique candidates **********************/
- for (k = 0; k < num_can; k++)
- {
- i = i0 + mvx[k];
- j = j0 + mvy[k];
-
- if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
- {
- cand = ref + i + j * lx;
- d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
- mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
- d += mvcost;
-
- if (d < dmin)
- {
- dmin = d;
- imin = i;
- jmin = j;
- ncand = cand;
- min_sad = d - mvcost; // for rate control
- }
- }
- }
- }
-
- /******************* local refinement ***************************/
- center_again = 0;
- last_loc = new_loc = 0;
- // ncand = ref + jmin*lx + imin; /* center of the search */
- step = 0;
- dn[0] = dmin;
- while (!center_again && step <= max_step)
- {
-
- AVCMoveNeighborSAD(dn, last_loc);
-
- center_again = 1;
- i = imin;
- j = jmin - 1;
- cand = ref + i + j * lx;
-
- /* starting from [0,-1] */
- /* spiral check one step at a time*/
- for (k = 2; k <= 8; k += 2)
- {
- if (!tab_exclude[last_loc][k]) /* exclude last step computation */
- { /* not already computed */
- if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
- {
- d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
- mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
- d += mvcost;
-
- dn[k] = d; /* keep it for half pel use */
-
- if (d < dmin)
- {
- ncand = cand;
- dmin = d;
- imin = i;
- jmin = j;
- center_again = 0;
- new_loc = k;
- min_sad = d - mvcost; // for rate control
- }
- }
- }
- if (k == 8) /* end side search*/
- {
- if (!center_again)
- {
- k = -1; /* start diagonal search */
- cand -= lx;
- j--;
- }
- }
- else
- {
- next = refine_next[k][0];
- i += next;
- cand += next;
- next = refine_next[k][1];
- j += next;
- cand += lx * next;
- }
- }
- last_loc = new_loc;
- step ++;
- }
- if (!center_again)
- AVCMoveNeighborSAD(dn, last_loc);
-
- *hp_guess = AVCFindMin(dn);
-
- encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
- }
- }
-
- mot16x16[mbnum].sad = dmin;
- mot16x16[mbnum].x = (imin - i0) << 2;
- mot16x16[mbnum].y = (jmin - j0) << 2;
- best_cand[0] = ncand;
-
- if (rateCtrl->subPelEnable) // always enable half-pel search
- {
- /* find half-pel resolution motion vector */
- min_sad = AVCFindHalfPelMB(encvid, cur, mot16x16 + mbnum, best_cand[0], i0, j0, *hp_guess, cmvx, cmvy);
-
- encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
-
-
- if (encvid->best_qpel_pos == -1)
- {
- ncand = encvid->hpel_cand[encvid->best_hpel_pos];
- }
- else
- {
- ncand = encvid->qpel_cand[encvid->best_qpel_pos];
- }
- }
- else
- {
- encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
- }
-
- /** do motion comp here for now */
- ref = currPic->Sl + i0 + j0 * lx;
- /* copy from the best result to current Picture */
- for (j = 0; j < 16; j++)
- {
- for (i = 0; i < 16; i++)
- {
- *ref++ = *ncand++;
- }
- ref += (lx - 16);
- ncand += 8;
- }
-
- return ;
-}
-
-#endif
-
-/*===============================================================================
- Function: AVCFullSearch
- Date: 09/16/2000
- Purpose: Perform full-search motion estimation over the range of search
- region in a spiral-outward manner.
- Input/Output: VideoEncData, current Vol, previou Vop, pointer to the left corner of
- current VOP, current coord (also output), boundaries.
-===============================================================================*/
-int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,
- int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,
- int cmvx, int cmvy)
-{
- int range = encvid->rateCtrl->mvRange;
- AVCPictureData *currPic = encvid->common->currPic;
- uint8 *cand;
- int i, j, k, l;
- int d, dmin;
- int i0 = *imin; /* current position */
- int j0 = *jmin;
- int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;
- void *extra_info = encvid->sad_extra_info;
- int lx = currPic->pitch; /* with padding */
-
- int offset = i0 + j0 * lx;
-
- int lambda_motion = encvid->lambda_motion;
- uint8 *mvbits = encvid->mvbits;
- int mvshift = 2;
- int mvcost;
- int min_sad;
-
- cand = prev + offset;
-
- dmin = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info);
- mvcost = MV_COST(lambda_motion, mvshift, 0, 0, cmvx, cmvy);
- min_sad = dmin;
- dmin += mvcost;
-
- /* perform spiral search */
- for (k = 1; k <= range; k++)
- {
-
- i = i0 - k;
- j = j0 - k;
-
- cand = prev + i + j * lx;
-
- for (l = 0; l < 8*k; l++)
- {
- /* no need for boundary checking again */
- if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
- {
- d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info);
- mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
- d += mvcost;
-
- if (d < dmin)
- {
- dmin = d;
- *imin = i;
- *jmin = j;
- min_sad = d - mvcost;
- }
- }
-
- if (l < (k << 1))
- {
- i++;
- cand++;
- }
- else if (l < (k << 2))
- {
- j++;
- cand += lx;
- }
- else if (l < ((k << 2) + (k << 1)))
- {
- i--;
- cand--;
- }
- else
- {
- j--;
- cand -= lx;
- }
- }
- }
-
- encvid->rateCtrl->MADofMB[encvid->common->mbNum] = (min_sad / 256.0); // for rate control
-
- return dmin;
-}
-
-/*===============================================================================
- Function: AVCCandidateSelection
- Date: 09/16/2000
- Purpose: Fill up the list of candidate using spatio-temporal correlation
- among neighboring blocks.
- Input/Output: type_pred = 0: first pass, 1: second pass, or no SCD
- Modified: , 09/23/01, get rid of redundant candidates before passing back.
- , 09/11/07, added return for modified predicted MV, this will be
- needed for both fast search and fullsearch.
-===============================================================================*/
-
-void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,
- AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy)
-{
- AVCCommonObj *video = encvid->common;
- AVCMV *mot16x16 = encvid->mot16x16;
- AVCMV *pmot;
- int mbnum = video->mbNum;
- int mbwidth = video->PicWidthInMbs;
- int mbheight = video->PicHeightInMbs;
- int i, j, same, num1;
-
- /* this part is for predicted MV */
- int pmvA_x = 0, pmvA_y = 0, pmvB_x = 0, pmvB_y = 0, pmvC_x = 0, pmvC_y = 0;
- int availA = 0, availB = 0, availC = 0;
-
- *num_can = 0;
-
- if (video->PrevRefFrameNum != 0) // previous frame is an IDR frame
- {
- /* Spatio-Temporal Candidate (five candidates) */
- if (type_pred == 0) /* first pass */
- {
- pmot = &mot16x16[mbnum]; /* same coordinate previous frame */
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- if (imb >= (mbwidth >> 1) && imb > 0) /*left neighbor previous frame */
- {
- pmot = &mot16x16[mbnum-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- else if (imb + 1 < mbwidth) /*right neighbor previous frame */
- {
- pmot = &mot16x16[mbnum+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
-
- if (jmb < mbheight - 1) /*bottom neighbor previous frame */
- {
- pmot = &mot16x16[mbnum+mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- else if (jmb > 0) /*upper neighbor previous frame */
- {
- pmot = &mot16x16[mbnum-mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
-
- if (imb > 0 && jmb > 0) /* upper-left neighbor current frame*/
- {
- pmot = &mot16x16[mbnum-mbwidth-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor current frame*/
- {
- pmot = &mot16x16[mbnum-mbwidth+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- }
- else /* second pass */
- /* original ST1 algorithm */
- {
- pmot = &mot16x16[mbnum]; /* same coordinate previous frame */
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
-
- if (imb > 0) /*left neighbor current frame */
- {
- pmot = &mot16x16[mbnum-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb > 0) /*upper neighbor current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (imb < mbwidth - 1) /*right neighbor previous frame */
- {
- pmot = &mot16x16[mbnum+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb < mbheight - 1) /*bottom neighbor previous frame */
- {
- pmot = &mot16x16[mbnum+mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- }
-
- /* get predicted MV */
- if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */
- {
- availA = 1;
- pmot = &mot16x16[mbnum-1];
- pmvA_x = pmot->x;
- pmvA_y = pmot->y;
- }
-
- if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */
- {
- availB = 1;
- pmot = &mot16x16[mbnum-mbwidth];
- pmvB_x = pmot->x;
- pmvB_y = pmot->y;
-
- availC = 1;
-
- if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth+1];
- }
- else /* get MV from top-left (D) neighbor of current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth-1];
- }
- pmvC_x = pmot->x;
- pmvC_y = pmot->y;
- }
-
- }
- else /* only Spatial Candidate (four candidates)*/
- {
- if (type_pred == 0) /*first pass*/
- {
- if (imb > 1) /* neighbor two blocks away to the left */
- {
- pmot = &mot16x16[mbnum-2];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (imb > 0 && jmb > 0) /* upper-left neighbor */
- {
- pmot = &mot16x16[mbnum-mbwidth-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor */
- {
- pmot = &mot16x16[mbnum-mbwidth+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
-
- /* get predicted MV */
- if (imb > 1) /* get MV from 2nd left (A) neighbor either of current frame */
- {
- availA = 1;
- pmot = &mot16x16[mbnum-2];
- pmvA_x = pmot->x;
- pmvA_y = pmot->y;
- }
-
- if (jmb > 0 && imb > 0) /* get MV from top-left (B) neighbor of current frame */
- {
- availB = 1;
- pmot = &mot16x16[mbnum-mbwidth-1];
- pmvB_x = pmot->x;
- pmvB_y = pmot->y;
- }
-
- if (jmb > 0 && imb < mbwidth - 1)
- {
- availC = 1;
- pmot = &mot16x16[mbnum-mbwidth+1];
- pmvC_x = pmot->x;
- pmvC_y = pmot->y;
- }
- }
-//#ifdef SCENE_CHANGE_DETECTION
- /* second pass (ST2 algorithm)*/
- else
- {
- if (type_pred == 1) /* 4/7/01 */
- {
- if (imb > 0) /*left neighbor current frame */
- {
- pmot = &mot16x16[mbnum-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb > 0) /*upper neighbor current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (imb < mbwidth - 1) /*right neighbor current frame */
- {
- pmot = &mot16x16[mbnum+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- if (jmb < mbheight - 1) /*bottom neighbor current frame */
- {
- pmot = &mot16x16[mbnum+mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- }
- //#else
- else /* original ST1 algorithm */
- {
- if (imb > 0) /*left neighbor current frame */
- {
- pmot = &mot16x16[mbnum-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
-
- if (jmb > 0) /*upper-left neighbor current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth-1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
-
- }
- if (jmb > 0) /*upper neighbor current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
-
- if (imb < mbheight - 1) /*upper-right neighbor current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth+1];
- mvx[(*num_can)] = (pmot->x) >> 2;
- mvy[(*num_can)++] = (pmot->y) >> 2;
- }
- }
- }
-
- /* get predicted MV */
- if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */
- {
- availA = 1;
- pmot = &mot16x16[mbnum-1];
- pmvA_x = pmot->x;
- pmvA_y = pmot->y;
- }
-
- if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */
- {
- availB = 1;
- pmot = &mot16x16[mbnum-mbwidth];
- pmvB_x = pmot->x;
- pmvB_y = pmot->y;
-
- availC = 1;
-
- if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth+1];
- }
- else /* get MV from top-left (D) neighbor of current frame */
- {
- pmot = &mot16x16[mbnum-mbwidth-1];
- }
- pmvC_x = pmot->x;
- pmvC_y = pmot->y;
- }
- }
-//#endif
- }
-
- /* 3/23/01, remove redundant candidate (possible k-mean) */
- num1 = *num_can;
- *num_can = 1;
- for (i = 1; i < num1; i++)
- {
- same = 0;
- j = 0;
- while (!same && j < *num_can)
- {
-#if (CANDIDATE_DISTANCE==0)
- if (mvx[i] == mvx[j] && mvy[i] == mvy[j])
-#else
- // modified k-mean, 3/24/01, shouldn't be greater than 3
- if (AVC_ABS(mvx[i] - mvx[j]) + AVC_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE)
-#endif
- same = 1;
- j++;
- }
- if (!same)
- {
- mvx[*num_can] = mvx[i];
- mvy[*num_can] = mvy[i];
- (*num_can)++;
- }
- }
-
- if (num1 == 5 && *num_can == 1)
- *num_can = ALL_CAND_EQUAL; /* all are equal */
-
- /* calculate predicted MV */
-
- if (availA && !(availB || availC))
- {
- *cmvx = pmvA_x;
- *cmvy = pmvA_y;
- }
- else
- {
- *cmvx = AVC_MEDIAN(pmvA_x, pmvB_x, pmvC_x);
- *cmvy = AVC_MEDIAN(pmvA_y, pmvB_y, pmvC_y);
- }
-
- return ;
-}
-
-
-/*************************************************************
- Function: AVCMoveNeighborSAD
- Date: 3/27/01
- Purpose: Move neighboring SAD around when center has shifted
-*************************************************************/
-
-void AVCMoveNeighborSAD(int dn[], int new_loc)
-{
- int tmp[9];
- tmp[0] = dn[0];
- tmp[1] = dn[1];
- tmp[2] = dn[2];
- tmp[3] = dn[3];
- tmp[4] = dn[4];
- tmp[5] = dn[5];
- tmp[6] = dn[6];
- tmp[7] = dn[7];
- tmp[8] = dn[8];
- dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536;
-
- switch (new_loc)
- {
- case 0:
- break;
- case 1:
- dn[4] = tmp[2];
- dn[5] = tmp[0];
- dn[6] = tmp[8];
- break;
- case 2:
- dn[4] = tmp[3];
- dn[5] = tmp[4];
- dn[6] = tmp[0];
- dn[7] = tmp[8];
- dn[8] = tmp[1];
- break;
- case 3:
- dn[6] = tmp[4];
- dn[7] = tmp[0];
- dn[8] = tmp[2];
- break;
- case 4:
- dn[1] = tmp[2];
- dn[2] = tmp[3];
- dn[6] = tmp[5];
- dn[7] = tmp[6];
- dn[8] = tmp[0];
- break;
- case 5:
- dn[1] = tmp[0];
- dn[2] = tmp[4];
- dn[8] = tmp[6];
- break;
- case 6:
- dn[1] = tmp[8];
- dn[2] = tmp[0];
- dn[3] = tmp[4];
- dn[4] = tmp[5];
- dn[8] = tmp[7];
- break;
- case 7:
- dn[2] = tmp[8];
- dn[3] = tmp[0];
- dn[4] = tmp[6];
- break;
- case 8:
- dn[2] = tmp[1];
- dn[3] = tmp[2];
- dn[4] = tmp[0];
- dn[5] = tmp[6];
- dn[6] = tmp[7];
- break;
- }
- dn[0] = tmp[new_loc];
-
- return ;
-}
-
-/* 3/28/01, find minimal of dn[9] */
-
-int AVCFindMin(int dn[])
-{
- int min, i;
- int dmin;
-
- dmin = dn[1];
- min = 1;
- for (i = 2; i < 9; i++)
- {
- if (dn[i] < dmin)
- {
- dmin = dn[i];
- min = i;
- }
- }
-
- return min;
-}
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/rate_control.cpp b/media/libstagefright/codecs/avc/enc/src/rate_control.cpp
deleted file mode 100644
index 09dcc28..0000000
--- a/media/libstagefright/codecs/avc/enc/src/rate_control.cpp
+++ /dev/null
@@ -1,979 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-#include <math.h>
-
-/* rate control variables */
-#define RC_MAX_QUANT 51
-#define RC_MIN_QUANT 0 //cap to 10 to prevent rate fluctuation
-
-#define MAD_MIN 1 /* handle the case of devision by zero in RC */
-
-
-/* local functions */
-double QP2Qstep(int QP);
-int Qstep2QP(double Qstep);
-
-double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl);
-
-void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP);
-
-void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,
- AVCRateControl *rateCtrl, MultiPass *pMP);
-
-void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP);
-
-void AVCSaveRDSamples(MultiPass *pMP, int counter_samples);
-
-void updateRateControl(AVCRateControl *rateControl, int nal_type);
-
-int GetAvgFrameQP(AVCRateControl *rateCtrl)
-{
- return rateCtrl->Qc;
-}
-
-AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum)
-{
- AVCCommonObj *video = encvid->common;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- uint32 modTimeRef = encvid->modTimeRef;
- int32 currFrameNum ;
- int frameInc;
-
-
- /* check with the buffer fullness to make sure that we have enough bits to encode this frame */
- /* we can use a threshold to guarantee minimum picture quality */
- /**********************************/
-
- /* for now, the default is to encode every frame, To Be Changed */
- if (rateCtrl->first_frame)
- {
- encvid->modTimeRef = modTime;
- encvid->wrapModTime = 0;
- encvid->prevFrameNum = 0;
- encvid->prevProcFrameNum = 0;
-
- *frameNum = 0;
-
- /* set frame type to IDR-frame */
- video->nal_unit_type = AVC_NALTYPE_IDR;
- sliceHdr->slice_type = AVC_I_ALL_SLICE;
- video->slice_type = AVC_I_SLICE;
-
- return AVCENC_SUCCESS;
- }
- else
- {
- if (modTime < modTimeRef) /* modTime wrapped around */
- {
- encvid->wrapModTime += ((uint32)0xFFFFFFFF - modTimeRef) + 1;
- encvid->modTimeRef = modTimeRef = 0;
- }
- modTime += encvid->wrapModTime; /* wrapModTime is non zero after wrap-around */
-
- currFrameNum = (int32)(((modTime - modTimeRef) * rateCtrl->frame_rate + 200) / 1000); /* add small roundings */
-
- if (currFrameNum <= (int32)encvid->prevProcFrameNum)
- {
- return AVCENC_FAIL; /* this is a late frame do not encode it */
- }
-
- frameInc = currFrameNum - encvid->prevProcFrameNum;
-
- if (frameInc < rateCtrl->skip_next_frame + 1)
- {
- return AVCENC_FAIL; /* frame skip required to maintain the target bit rate. */
- }
-
- RCUpdateBuffer(video, rateCtrl, frameInc - rateCtrl->skip_next_frame); /* in case more frames dropped */
-
- *frameNum = currFrameNum;
-
- /* This part would be similar to DetermineVopType of m4venc */
- if ((*frameNum >= (uint)rateCtrl->idrPeriod && rateCtrl->idrPeriod > 0) || (*frameNum > video->MaxFrameNum)) /* first frame or IDR*/
- {
- /* set frame type to IDR-frame */
- if (rateCtrl->idrPeriod)
- {
- encvid->modTimeRef += (uint32)(rateCtrl->idrPeriod * 1000 / rateCtrl->frame_rate);
- *frameNum -= rateCtrl->idrPeriod;
- }
- else
- {
- encvid->modTimeRef += (uint32)(video->MaxFrameNum * 1000 / rateCtrl->frame_rate);
- *frameNum -= video->MaxFrameNum;
- }
-
- video->nal_unit_type = AVC_NALTYPE_IDR;
- sliceHdr->slice_type = AVC_I_ALL_SLICE;
- video->slice_type = AVC_I_SLICE;
- encvid->prevProcFrameNum = *frameNum;
- }
- else
- {
- video->nal_unit_type = AVC_NALTYPE_SLICE;
- sliceHdr->slice_type = AVC_P_ALL_SLICE;
- video->slice_type = AVC_P_SLICE;
- encvid->prevProcFrameNum = currFrameNum;
- }
-
- }
-
- return AVCENC_SUCCESS;
-}
-
-void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc)
-{
- int tmp;
- MultiPass *pMP = rateCtrl->pMP;
-
- OSCL_UNUSED_ARG(video);
-
- if (rateCtrl->rcEnable == TRUE)
- {
- if (frameInc > 1)
- {
- tmp = rateCtrl->bitsPerFrame * (frameInc - 1);
- rateCtrl->VBV_fullness -= tmp;
- pMP->counter_BTsrc += 10 * (frameInc - 1);
-
- /* Check buffer underflow */
- if (rateCtrl->VBV_fullness < rateCtrl->low_bound)
- {
- rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;
- rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;
- pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
- }
- }
- }
-}
-
-
-AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCCommonObj *video = encvid->common;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- double L1, L2, L3, bpp;
- int qp;
- int i;
-
- rateCtrl->basicUnit = video->PicSizeInMbs;
-
- rateCtrl->MADofMB = (double*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
- video->PicSizeInMbs * sizeof(double), DEFAULT_ATTR);
-
- if (!rateCtrl->MADofMB)
- {
- goto CLEANUP_RC;
- }
-
- if (rateCtrl->rcEnable == TRUE)
- {
- rateCtrl->pMP = (MultiPass*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, sizeof(MultiPass), DEFAULT_ATTR);
- if (!rateCtrl->pMP)
- {
- goto CLEANUP_RC;
- }
- rateCtrl->pMP->encoded_frames = -1; /* forget about the very first I frame */
-
- /* RDInfo **pRDSamples */
- rateCtrl->pMP->pRDSamples = (RDInfo **)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (30 * sizeof(RDInfo *)), DEFAULT_ATTR);
- if (!rateCtrl->pMP->pRDSamples)
- {
- goto CLEANUP_RC;
- }
-
- for (i = 0; i < 30; i++)
- {
- rateCtrl->pMP->pRDSamples[i] = (RDInfo *)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (32 * sizeof(RDInfo)), DEFAULT_ATTR);
- if (!rateCtrl->pMP->pRDSamples[i])
- {
- goto CLEANUP_RC;
- }
- }
- rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/
- rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5);
- rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30);
-
- rateCtrl->pMP->framePos = -1;
-
-
- rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate);
-
- /* BX rate control */
- rateCtrl->skip_next_frame = 0; /* must be initialized */
-
- rateCtrl->Bs = rateCtrl->cpbSize;
- rateCtrl->TMN_W = 0;
- rateCtrl->VBV_fullness = (int)(rateCtrl->Bs * 0.5); /* rateCtrl->Bs */
- rateCtrl->encoded_frames = 0;
-
- rateCtrl->TMN_TH = rateCtrl->bitsPerFrame;
-
- rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5;
- if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5;
-
- // Set the initial buffer fullness
- /* According to the spec, the initial buffer fullness needs to be set to 1/3 */
- rateCtrl->VBV_fullness = (int)(rateCtrl->Bs / 3.0 - rateCtrl->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */
- rateCtrl->pMP->counter_BTsrc = (int)((rateCtrl->Bs / 2.0 - rateCtrl->Bs / 3.0) / (rateCtrl->bitsPerFrame / 10.0));
- rateCtrl->TMN_W = (int)(rateCtrl->VBV_fullness + rateCtrl->pMP->counter_BTsrc * (rateCtrl->bitsPerFrame / 10.0));
-
- rateCtrl->low_bound = -rateCtrl->Bs / 2;
- rateCtrl->VBV_fullness_offset = 0;
-
- /* Setting the bitrate and framerate */
- rateCtrl->pMP->bitrate = rateCtrl->bitRate;
- rateCtrl->pMP->framerate = rateCtrl->frame_rate;
- rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate;
-
- /*compute the initial QP*/
- bpp = 1.0 * rateCtrl->bitRate / (rateCtrl->frame_rate * (video->PicSizeInMbs << 8));
- if (video->PicWidthInSamplesL == 176)
- {
- L1 = 0.1;
- L2 = 0.3;
- L3 = 0.6;
- }
- else if (video->PicWidthInSamplesL == 352)
- {
- L1 = 0.2;
- L2 = 0.6;
- L3 = 1.2;
- }
- else
- {
- L1 = 0.6;
- L2 = 1.4;
- L3 = 2.4;
- }
-
- if (rateCtrl->initQP == 0)
- {
- if (bpp <= L1)
- qp = 35;
- else if (bpp <= L2)
- qp = 25;
- else if (bpp <= L3)
- qp = 20;
- else
- qp = 15;
- rateCtrl->initQP = qp;
- }
-
- rateCtrl->Qc = rateCtrl->initQP;
- }
-
- return AVCENC_SUCCESS;
-
-CLEANUP_RC:
-
- CleanupRateControlModule(avcHandle);
- return AVCENC_MEMORY_FAIL;
-
-}
-
-
-void CleanupRateControlModule(AVCHandle *avcHandle)
-{
- AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- int i;
-
- if (rateCtrl->MADofMB)
- {
- avcHandle->CBAVC_Free(avcHandle->userData, rateCtrl->MADofMB);
- }
-
- if (rateCtrl->pMP)
- {
- if (rateCtrl->pMP->pRDSamples)
- {
- for (i = 0; i < 30; i++)
- {
- if (rateCtrl->pMP->pRDSamples[i])
- {
- avcHandle->CBAVC_Free(avcHandle->userData, rateCtrl->pMP->pRDSamples[i]);
- }
- }
- avcHandle->CBAVC_Free(avcHandle->userData, rateCtrl->pMP->pRDSamples);
- }
- avcHandle->CBAVC_Free(avcHandle->userData, rateCtrl->pMP);
- }
-
- return ;
-}
-
-void RCInitGOP(AVCEncObject *encvid)
-{
- /* in BX RC, there's no GOP-level RC */
-
- OSCL_UNUSED_ARG(encvid);
-
- return ;
-}
-
-
-void RCInitFrameQP(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCPicParamSet *picParam = video->currPicParams;
- MultiPass *pMP = rateCtrl->pMP;
-
- if (rateCtrl->rcEnable == TRUE)
- {
- /* frame layer rate control */
- if (rateCtrl->encoded_frames == 0)
- {
- video->QPy = rateCtrl->Qc = rateCtrl->initQP;
- }
- else
- {
- calculateQuantizer_Multipass(encvid, video, rateCtrl, pMP);
- video->QPy = rateCtrl->Qc;
- }
-
- rateCtrl->NumberofHeaderBits = 0;
- rateCtrl->NumberofTextureBits = 0;
- rateCtrl->numFrameBits = 0; // reset
-
- /* update pMP->framePos */
- if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0;
-
- if (rateCtrl->T == 0)
- {
- pMP->counter_BTdst = (int)(rateCtrl->frame_rate * 7.5 + 0.5); /* 0.75s time frame */
- pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, (int)(rateCtrl->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */
- pMP->counter_BTdst = AVC_MAX(pMP->counter_BTdst, (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.30 / (rateCtrl->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */
- pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */
-
- pMP->target_bits = rateCtrl->T = rateCtrl->TMN_TH = (int)(rateCtrl->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1));
- pMP->diff_counter = pMP->counter_BTdst;
- }
-
- /* collect the necessary data: target bits, actual bits, mad and QP */
- pMP->target_bits = rateCtrl->T;
- pMP->QP = video->QPy;
-
- pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);
- if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
-
- pMP->bitrate = rateCtrl->bitRate; /* calculated in RCVopQPSetting */
- pMP->framerate = rateCtrl->frame_rate;
-
- /* first pass encoding */
- pMP->nRe_Quantized = 0;
-
- } // rcEnable
- else
- {
- video->QPy = rateCtrl->initQP;
- }
-
-// printf(" %d ",video->QPy);
-
- if (video->CurrPicNum == 0 && encvid->outOfBandParamSet == FALSE)
- {
- picParam->pic_init_qs_minus26 = 0;
- picParam->pic_init_qp_minus26 = video->QPy - 26;
- }
-
- // need this for motion estimation
- encvid->lambda_mode = QP2QUANT[AVC_MAX(0, video->QPy-SHIFT_QP)];
- encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);
- return ;
-}
-
-/* Mad based variable bit allocation + QP calculation with a new quadratic method */
-void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,
- AVCRateControl *rateCtrl, MultiPass *pMP)
-{
- int prev_actual_bits = 0, curr_target, /*pos=0,*/i, j;
- OsclFloat Qstep, prev_QP = 0.625;
-
- OsclFloat curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;
-
- /* Mad based variable bit allocation */
- targetBitCalculation(encvid, video, rateCtrl, pMP);
-
- if (rateCtrl->T <= 0 || rateCtrl->totalSAD == 0)
- {
- if (rateCtrl->T < 0) rateCtrl->Qc = RC_MAX_QUANT;
- return;
- }
-
- /* ---------------------------------------------------------------------------------------------------*/
- /* current frame QP estimation */
- curr_target = rateCtrl->T;
- curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;
- if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
- curr_RD = (OsclFloat)curr_target / curr_mad;
-
- if (rateCtrl->skip_next_frame == -1) // previous was skipped
- {
- i = pMP->framePos;
- prev_mad = pMP->pRDSamples[i][0].mad;
- prev_QP = pMP->pRDSamples[i][0].QP;
- prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;
- }
- else
- {
- /* Another version of search the optimal point */
- prev_mad = 0.0;
- i = 0;
- while (i < pMP->frameRange && prev_mad < 0.001) /* find first one with nonzero prev_mad */
- {
- prev_mad = pMP->pRDSamples[i][0].mad;
- i++;
- }
-
- if (i < pMP->frameRange)
- {
- prev_actual_bits = pMP->pRDSamples[i-1][0].actual_bits;
-
- for (j = 0; i < pMP->frameRange; i++)
- {
- if (pMP->pRDSamples[i][0].mad != 0 &&
- AVC_ABS(prev_mad - curr_mad) > AVC_ABS(pMP->pRDSamples[i][0].mad - curr_mad))
- {
- prev_mad = pMP->pRDSamples[i][0].mad;
- prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;
- j = i;
- }
- }
- prev_QP = QP2Qstep(pMP->pRDSamples[j][0].QP);
-
- for (i = 1; i < pMP->samplesPerFrame[j]; i++)
- {
- if (AVC_ABS(prev_actual_bits - curr_target) > AVC_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target))
- {
- prev_actual_bits = pMP->pRDSamples[j][i].actual_bits;
- prev_QP = QP2Qstep(pMP->pRDSamples[j][i].QP);
- }
- }
- }
- }
-
- // quadratic approximation
- if (prev_mad > 0.001) // only when prev_mad is greater than 0, otherwise keep using the same QP
- {
- prev_RD = (OsclFloat)prev_actual_bits / prev_mad;
- //rateCtrl->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4);
- if (prev_QP == 0.625) // added this to allow getting out of QP = 0 easily
- {
- Qstep = (int)(prev_RD / curr_RD + 0.5);
- }
- else
- {
- // rateCtrl->Qc =(Int)(prev_QP * M4VENC_SQRT(prev_RD/curr_RD) + 0.9);
-
- if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0)
- Qstep = (int)(prev_QP * (sqrt(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */
- else
- Qstep = (int)(prev_QP * (sqrt(prev_RD / curr_RD) + pow(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9);
- }
- // lower bound on Qc should be a function of curr_mad
- // When mad is already low, lower bound on Qc doesn't have to be small.
- // Note, this doesn't work well for low complexity clip encoded at high bit rate
- // it doesn't hit the target bit rate due to this QP lower bound.
- /// if((curr_mad < 8) && (rateCtrl->Qc < 12)) rateCtrl->Qc = 12;
- // else if((curr_mad < 128) && (rateCtrl->Qc < 3)) rateCtrl->Qc = 3;
-
- rateCtrl->Qc = Qstep2QP(Qstep);
-
- if (rateCtrl->Qc < RC_MIN_QUANT) rateCtrl->Qc = RC_MIN_QUANT;
- if (rateCtrl->Qc > RC_MAX_QUANT) rateCtrl->Qc = RC_MAX_QUANT;
- }
-
- /* active bit resource protection */
- aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (OsclFloat)pMP->encoded_frames);
- average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (OsclFloat)pMP->encoded_frames); /* this function is called from the scond encoded frame*/
- if (pMP->diff_counter == 0 &&
- ((OsclFloat)rateCtrl->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) &&
- pMP->counter_BTsrc <= (pMP->counter_BTdst + (int)(pMP->framerate*1.0 + 0.5)))
- {
- rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame / 10.0);
- rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;
- pMP->counter_BTsrc++;
- pMP->diff_counter--;
- }
-
-}
-
-void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP)
-{
- OSCL_UNUSED_ARG(encvid);
- OsclFloat curr_mad;//, average_mad;
- int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;
- /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */
-
- /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/
- updateRC_PostProc(rateCtrl, pMP);
-
- /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */
- if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000)
- {
- pMP->counter_BTsrc -= 1000;
- pMP->counter_BTdst -= 1000;
- }
-
- /* ---------------------------------------------------------------------------------------------------*/
- /* target calculation */
- curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;
- if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
- diff_counter_BTsrc = diff_counter_BTdst = 0;
- pMP->diff_counter = 0;
-
-
- /*1.calculate average mad */
- pMP->sum_mad += curr_mad;
- //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(OsclFloat)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/
- //pMP->aver_mad = average_mad;
- if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */
- pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1);
-
- if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0)
- pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1);
-
- /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */
- if (pMP->overlapped_win_size == 0)
- {
- /* original verison */
- if (curr_mad > pMP->aver_mad*1.1)
- {
- if (curr_mad / (pMP->aver_mad + 0.0001) > 2)
- diff_counter_BTdst = (int)(sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10;
- //diff_counter_BTdst = (int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10;
- else
- diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10;
- }
- else /* curr_mad <= average_mad*1.1 */
- //diff_counter_BTsrc = 10 - (int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4);
- diff_counter_BTsrc = 10 - (int)(sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5);
-
- /* actively fill in the possible gap */
- if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
- curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
- diff_counter_BTsrc = 1;
-
- }
- else if (pMP->overlapped_win_size > 0)
- {
- /* transition time: use previous average mad "pMP->aver_mad_prev" instead of the current average mad "pMP->aver_mad" */
- if (curr_mad > pMP->aver_mad_prev*1.1)
- {
- if (curr_mad / pMP->aver_mad_prev > 2)
- diff_counter_BTdst = (int)(sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10;
- //diff_counter_BTdst = (int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10;
- else
- diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10;
- }
- else /* curr_mad <= average_mad*1.1 */
- //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4);
- diff_counter_BTsrc = 10 - (int)(sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5);
-
- /* actively fill in the possible gap */
- if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
- curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
- diff_counter_BTsrc = 1;
-
- if (--pMP->overlapped_win_size <= 0) pMP->overlapped_win_size = 0;
- }
-
-
- /* if difference is too much, do clipping */
- /* First, set the upper bound for current bit allocation variance: 80% of available buffer */
- bound = (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rateCtrl->Bs */
- diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound);
- diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound);
-
- /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */
- bound = 50;
-// if(video->encParams->RC_Type == CBR_LOWDELAY)
-// not necessary bound = 10; -- For Low delay */
-
- diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound);
- diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound);
-
-
- /* Third, check the buffer */
- prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc;
- curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc);
-
- if (AVC_ABS(prev_counter_diff) >= rateCtrl->max_BitVariance_num || AVC_ABS(curr_counter_diff) >= rateCtrl->max_BitVariance_num)
- { //diff_counter_BTsrc = diff_counter_BTdst = 0;
-
- if (curr_counter_diff > rateCtrl->max_BitVariance_num && diff_counter_BTdst)
- {
- diff_counter_BTdst = (rateCtrl->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc;
- if (diff_counter_BTdst < 0) diff_counter_BTdst = 0;
- }
-
- else if (curr_counter_diff < -rateCtrl->max_BitVariance_num && diff_counter_BTsrc)
- {
- diff_counter_BTsrc = diff_counter_BTdst - (-rateCtrl->max_BitVariance_num - prev_counter_diff);
- if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0;
- }
- }
-
-
- /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */
- rateCtrl->TMN_TH = (int)(pMP->target_bits_per_frame);
- pMP->diff_counter = 0;
-
- if (diff_counter_BTsrc)
- {
- rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1);
- pMP->diff_counter = -diff_counter_BTsrc;
- }
- else if (diff_counter_BTdst)
- {
- rateCtrl->TMN_TH += (int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1);
- pMP->diff_counter = diff_counter_BTdst;
- }
-
-
- /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */
- pMP->counter_BTsrc += diff_counter_BTsrc;
- pMP->counter_BTdst += diff_counter_BTdst;
-
-
- /*5.target bit calculation */
- rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;
-
- return ;
-}
-
-void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP)
-{
- if (rateCtrl->skip_next_frame > 0) /* skip next frame */
- {
- pMP->counter_BTsrc += 10 * rateCtrl->skip_next_frame;
-
- }
- else if (rateCtrl->skip_next_frame == -1) /* skip current frame */
- {
- pMP->counter_BTdst -= pMP->diff_counter;
- pMP->counter_BTsrc += 10;
-
- pMP->sum_mad -= pMP->mad;
- pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (pMP->encoded_frames - 1 + 0.0001);
- pMP->sum_QP -= pMP->QP;
- pMP->encoded_frames --;
- }
- /* some stuff in update VBV_fullness remains here */
- //if(rateCtrl->VBV_fullness < -rateCtrl->Bs/2) /* rateCtrl->Bs */
- if (rateCtrl->VBV_fullness < rateCtrl->low_bound)
- {
- rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;
- rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;
- pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
- }
-}
-
-
-void RCInitChromaQP(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCMacroblock *currMB = video->currMB;
- int q_bits;
-
- /* we have to do the same thing for AVC_CLIP3(0,51,video->QSy) */
-
- video->QPy_div_6 = (currMB->QPy * 43) >> 8;
- video->QPy_mod_6 = currMB->QPy - 6 * video->QPy_div_6;
- currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, currMB->QPy + video->currPicParams->chroma_qp_index_offset)];
- video->QPc_div_6 = (video->QPc * 43) >> 8;
- video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;
-
- /* pre-calculate this to save computation */
- q_bits = 4 + video->QPy_div_6;
- if (video->slice_type == AVC_I_SLICE)
- {
- encvid->qp_const = 682 << q_bits; // intra
- }
- else
- {
- encvid->qp_const = 342 << q_bits; // inter
- }
-
- q_bits = 4 + video->QPc_div_6;
- if (video->slice_type == AVC_I_SLICE)
- {
- encvid->qp_const_c = 682 << q_bits; // intra
- }
- else
- {
- encvid->qp_const_c = 342 << q_bits; // inter
- }
-
- encvid->lambda_mode = QP2QUANT[AVC_MAX(0, currMB->QPy-SHIFT_QP)];
- encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);
-
- return ;
-}
-
-
-void RCInitMBQP(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCMacroblock *currMB = video->currMB;
-
- currMB->QPy = video->QPy; /* set to previous value or picture level */
-
- RCInitChromaQP(encvid);
-
-}
-
-void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits)
-{
- OSCL_UNUSED_ARG(video);
- rateCtrl->numMBHeaderBits = num_header_bits;
- rateCtrl->numMBTextureBits = num_texture_bits;
- rateCtrl->NumberofHeaderBits += rateCtrl->numMBHeaderBits;
- rateCtrl->NumberofTextureBits += rateCtrl->numMBTextureBits;
-}
-
-void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid)
-{
- currMB->QPy = video->QPy; /* use previous QP */
- RCInitChromaQP(encvid);
-
- return ;
-}
-
-
-void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch)
-{
- AVCCommonObj *video = encvid->common;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- uint32 dmin_lx;
-
- if (rateCtrl->rcEnable == TRUE)
- {
- if (currMB->mb_intra)
- {
- if (currMB->mbMode == AVC_I16)
- {
- dmin_lx = (0xFFFF << 16) | orgPitch;
- rateCtrl->MADofMB[video->mbNum] = AVCSAD_Macroblock_C(orgL,
- encvid->pred_i16[currMB->i16Mode], dmin_lx, NULL);
- }
- else /* i4 */
- {
- rateCtrl->MADofMB[video->mbNum] = encvid->i4_sad / 256.;
- }
- }
- /* for INTER, we have already saved it with the MV search */
- }
-
- return ;
-}
-
-
-
-AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid)
-{
- AVCCommonObj *video = encvid->common;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- AVCEnc_Status status = AVCENC_SUCCESS;
- MultiPass *pMP = rateCtrl->pMP;
- int diff_BTCounter;
- int nal_type = video->nal_unit_type;
-
- /* update the complexity weight of I, P, B frame */
-
- if (rateCtrl->rcEnable == TRUE)
- {
- pMP->actual_bits = rateCtrl->numFrameBits;
- pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);
-
- AVCSaveRDSamples(pMP, 0);
-
- pMP->encoded_frames++;
-
- /* for pMP->samplesPerFrame */
- pMP->samplesPerFrame[pMP->framePos] = 0;
-
- pMP->sum_QP += pMP->QP;
-
- /* update pMP->counter_BTsrc, pMP->counter_BTdst */
- /* re-allocate the target bit again and then stop encoding */
- diff_BTCounter = (int)((OsclFloat)(rateCtrl->TMN_TH - rateCtrl->TMN_W - pMP->actual_bits) /
- (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1);
- if (diff_BTCounter >= 0)
- pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */
- else
- pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */
-
- rateCtrl->TMN_TH -= (int)((OsclFloat)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1));
- rateCtrl->T = pMP->target_bits = rateCtrl->TMN_TH - rateCtrl->TMN_W;
- pMP->diff_counter -= diff_BTCounter;
-
- rateCtrl->Rc = rateCtrl->numFrameBits; /* Total Bits for current frame */
- rateCtrl->Hc = rateCtrl->NumberofHeaderBits; /* Total Bits in Header and Motion Vector */
-
- /* BX_RC */
- updateRateControl(rateCtrl, nal_type);
- if (rateCtrl->skip_next_frame == -1) // skip current frame
- {
- status = AVCENC_SKIPPED_PICTURE;
- }
- }
-
- rateCtrl->first_frame = 0; // reset here after we encode the first frame.
-
- return status;
-}
-
-void AVCSaveRDSamples(MultiPass *pMP, int counter_samples)
-{
- /* for pMP->pRDSamples */
- pMP->pRDSamples[pMP->framePos][counter_samples].QP = pMP->QP;
- pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits;
- pMP->pRDSamples[pMP->framePos][counter_samples].mad = pMP->mad;
- pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (OsclFloat)pMP->actual_bits / (pMP->mad + 0.0001);
-
- return ;
-}
-
-void updateRateControl(AVCRateControl *rateCtrl, int nal_type)
-{
- int frame_bits;
- MultiPass *pMP = rateCtrl->pMP;
-
- /* BX rate contro\l */
- frame_bits = (int)(rateCtrl->bitRate / rateCtrl->frame_rate);
- rateCtrl->TMN_W += (rateCtrl->Rc - rateCtrl->TMN_TH);
- rateCtrl->VBV_fullness += (rateCtrl->Rc - frame_bits); //rateCtrl->Rp);
- //if(rateCtrl->VBV_fullness < 0) rateCtrl->VBV_fullness = -1;
-
- rateCtrl->encoded_frames++;
-
- /* frame dropping */
- rateCtrl->skip_next_frame = 0;
-
- if ((rateCtrl->VBV_fullness > rateCtrl->Bs / 2) && nal_type != AVC_NALTYPE_IDR) /* skip the current frame */ /* rateCtrl->Bs */
- {
- rateCtrl->TMN_W -= (rateCtrl->Rc - rateCtrl->TMN_TH);
- rateCtrl->VBV_fullness -= rateCtrl->Rc;
- rateCtrl->skip_next_frame = -1;
- }
- else if ((OsclFloat)(rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95) /* skip next frame */
- {
- rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;
- rateCtrl->skip_next_frame = 1;
- pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
- /* BX_1, skip more than 1 frames */
- //while(rateCtrl->VBV_fullness > rateCtrl->Bs*0.475)
- while ((rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95)
- {
- rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;
- rateCtrl->skip_next_frame++;
- pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
- }
-
- /* END BX_1 */
- }
-}
-
-
-double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl)
-{
- double TotalMAD;
- int i;
- TotalMAD = 0.0;
- for (i = 0; i < (int)video->PicSizeInMbs; i++)
- TotalMAD += rateCtrl->MADofMB[i];
- TotalMAD /= video->PicSizeInMbs;
- return TotalMAD;
-}
-
-
-
-
-
-/* convert from QP to Qstep */
-double QP2Qstep(int QP)
-{
- int i;
- double Qstep;
- static const double QP2QSTEP[6] = { 0.625, 0.6875, 0.8125, 0.875, 1.0, 1.125 };
-
- Qstep = QP2QSTEP[QP % 6];
- for (i = 0; i < (QP / 6); i++)
- Qstep *= 2;
-
- return Qstep;
-}
-
-/* convert from step size to QP */
-int Qstep2QP(double Qstep)
-{
- int q_per = 0, q_rem = 0;
-
- // assert( Qstep >= QP2Qstep(0) && Qstep <= QP2Qstep(51) );
- if (Qstep < QP2Qstep(0))
- return 0;
- else if (Qstep > QP2Qstep(51))
- return 51;
-
- while (Qstep > QP2Qstep(5))
- {
- Qstep /= 2;
- q_per += 1;
- }
-
- if (Qstep <= (0.625 + 0.6875) / 2)
- {
- Qstep = 0.625;
- q_rem = 0;
- }
- else if (Qstep <= (0.6875 + 0.8125) / 2)
- {
- Qstep = 0.6875;
- q_rem = 1;
- }
- else if (Qstep <= (0.8125 + 0.875) / 2)
- {
- Qstep = 0.8125;
- q_rem = 2;
- }
- else if (Qstep <= (0.875 + 1.0) / 2)
- {
- Qstep = 0.875;
- q_rem = 3;
- }
- else if (Qstep <= (1.0 + 1.125) / 2)
- {
- Qstep = 1.0;
- q_rem = 4;
- }
- else
- {
- Qstep = 1.125;
- q_rem = 5;
- }
-
- return (q_per * 6 + q_rem);
-}
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/residual.cpp b/media/libstagefright/codecs/avc/enc/src/residual.cpp
deleted file mode 100644
index 42eb910..0000000
--- a/media/libstagefright/codecs/avc/enc/src/residual.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-AVCEnc_Status EncodeIntraPCM(AVCEncObject *encvid)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- AVCCommonObj *video = encvid->common;
- AVCFrameIO *currInput = encvid->currInput;
- AVCEncBitstream *stream = encvid->bitstream;
- int x_position = (video->mb_x << 4);
- int y_position = (video->mb_y << 4);
- int orgPitch = currInput->pitch;
- int offset1 = y_position * orgPitch + x_position;
- int i, j;
- int offset;
- uint8 *pDst, *pSrc;
- uint code;
-
- ue_v(stream, 25);
-
- i = stream->bit_left & 0x7;
- if (i) /* not byte-aligned */
- {
- BitstreamWriteBits(stream, 0, i);
- }
-
- pSrc = currInput->YCbCr[0] + offset1;
- pDst = video->currPic->Sl + offset1;
- offset = video->PicWidthInSamplesL - 16;
-
- /* at this point bitstream is byte-aligned */
- j = 16;
- while (j > 0)
- {
-#if (WORD_SIZE==32)
- for (i = 0; i < 4; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 4;
- *((uint*)pDst) = code;
- pDst += 4;
- status = BitstreamWriteBits(stream, 32, code);
- }
-#else
- for (i = 0; i < 8; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 2;
- *((uint*)pDst) = code;
- pDst += 2;
- status = BitstreamWriteBits(stream, 16, code);
- }
-#endif
- pDst += offset;
- pSrc += offset;
- j--;
- }
- if (status != AVCENC_SUCCESS) /* check only once per line */
- return status;
-
- pDst = video->currPic->Scb + ((offset1 + x_position) >> 2);
- pSrc = currInput->YCbCr[1] + ((offset1 + x_position) >> 2);
- offset >>= 1;
-
- j = 8;
- while (j > 0)
- {
-#if (WORD_SIZE==32)
- for (i = 0; i < 2; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 4;
- *((uint*)pDst) = code;
- pDst += 4;
- status = BitstreamWriteBits(stream, 32, code);
- }
-#else
- for (i = 0; i < 4; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 2;
- *((uint*)pDst) = code;
- pDst += 2;
- status = BitstreamWriteBits(stream, 16, code);
- }
-#endif
- pDst += offset;
- pSrc += offset;
- j--;
- }
-
- if (status != AVCENC_SUCCESS) /* check only once per line */
- return status;
-
- pDst = video->currPic->Scr + ((offset1 + x_position) >> 2);
- pSrc = currInput->YCbCr[2] + ((offset1 + x_position) >> 2);
-
- j = 8;
- while (j > 0)
- {
-#if (WORD_SIZE==32)
- for (i = 0; i < 2; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 4;
- *((uint*)pDst) = code;
- pDst += 4;
- status = BitstreamWriteBits(stream, 32, code);
- }
-#else
- for (i = 0; i < 4; i++)
- {
- code = *((uint*)pSrc);
- pSrc += 2;
- *((uint*)pDst) = code;
- pDst += 2;
- status = BitstreamWriteBits(stream, 16, code);
- }
-#endif
- pDst += offset;
- pSrc += offset;
- j--;
- }
-
- return status;
-}
-
-
-AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int cindx, AVCMacroblock *currMB)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- AVCCommonObj *video = encvid->common;
- int i, maxNumCoeff, nC;
- int cdc = 0, cac = 0;
- int TrailingOnes;
- AVCEncBitstream *stream = encvid->bitstream;
- uint trailing_ones_sign_flag;
- int zerosLeft;
- int *level, *run;
- int TotalCoeff;
- const static int incVlc[] = {0, 3, 6, 12, 24, 48, 32768}; // maximum vlc = 6
- int escape, numPrefix, sufmask, suffix, shift, sign, value, absvalue, vlcnum, level_two_or_higher;
- int bindx = blkIdx2blkXY[cindx>>2][cindx&3] ; // raster scan index
-
- switch (type)
- {
- case AVC_Luma:
- maxNumCoeff = 16;
- level = encvid->level[cindx];
- run = encvid->run[cindx];
- TotalCoeff = currMB->nz_coeff[bindx];
- break;
- case AVC_Intra16DC:
- maxNumCoeff = 16;
- level = encvid->leveldc;
- run = encvid->rundc;
- TotalCoeff = cindx; /* special case */
- bindx = 0;
- cindx = 0;
- break;
- case AVC_Intra16AC:
- maxNumCoeff = 15;
- level = encvid->level[cindx];
- run = encvid->run[cindx];
- TotalCoeff = currMB->nz_coeff[bindx];
- break;
- case AVC_ChromaDC: /* how to differentiate Cb from Cr */
- maxNumCoeff = 4;
- cdc = 1;
- if (cindx >= 8)
- {
- level = encvid->levelcdc + 4;
- run = encvid->runcdc + 4;
- TotalCoeff = cindx - 8; /* special case */
- }
- else
- {
- level = encvid->levelcdc;
- run = encvid->runcdc;
- TotalCoeff = cindx; /* special case */
- }
- break;
- case AVC_ChromaAC:
- maxNumCoeff = 15;
- cac = 1;
- level = encvid->level[cindx];
- run = encvid->run[cindx];
- cindx -= 16;
- bindx = 16 + blkIdx2blkXY[cindx>>2][cindx&3];
- cindx += 16;
- TotalCoeff = currMB->nz_coeff[bindx];
- break;
- default:
- return AVCENC_FAIL;
- }
-
-
- /* find TrailingOnes */
- TrailingOnes = 0;
- zerosLeft = 0;
- i = TotalCoeff - 1;
- nC = 1;
- while (i >= 0)
- {
- zerosLeft += run[i];
- if (nC && (level[i] == 1 || level[i] == -1))
- {
- TrailingOnes++;
- }
- else
- {
- nC = 0;
- }
- i--;
- }
- if (TrailingOnes > 3)
- {
- TrailingOnes = 3; /* clip it */
- }
-
- if (!cdc)
- {
- if (!cac) /* not chroma */
- {
- nC = predict_nnz(video, bindx & 3, bindx >> 2);
- }
- else /* chroma ac but not chroma dc */
- {
- nC = predict_nnz_chroma(video, bindx & 3, bindx >> 2);
- }
-
- status = ce_TotalCoeffTrailingOnes(stream, TrailingOnes, TotalCoeff, nC);
- }
- else
- {
- nC = -1; /* Chroma DC level */
- status = ce_TotalCoeffTrailingOnesChromaDC(stream, TrailingOnes, TotalCoeff);
- }
-
- /* This part is done quite differently in ReadCoef4x4_CAVLC() */
- if (TotalCoeff > 0)
- {
-
- i = TotalCoeff - 1;
-
- if (TrailingOnes) /* keep reading the sign of those trailing ones */
- {
- nC = TrailingOnes;
- trailing_ones_sign_flag = 0;
- while (nC)
- {
- trailing_ones_sign_flag <<= 1;
- trailing_ones_sign_flag |= ((uint32)level[i--] >> 31); /* 0 or positive, 1 for negative */
- nC--;
- }
-
- /* instead of writing one bit at a time, read the whole thing at once */
- status = BitstreamWriteBits(stream, TrailingOnes, trailing_ones_sign_flag);
- }
-
- level_two_or_higher = 1;
- if (TotalCoeff > 3 && TrailingOnes == 3)
- {
- level_two_or_higher = 0;
- }
-
- if (TotalCoeff > 10 && TrailingOnes < 3)
- {
- vlcnum = 1;
- }
- else
- {
- vlcnum = 0;
- }
-
- /* then do this TotalCoeff-TrailingOnes times */
- for (i = TotalCoeff - TrailingOnes - 1; i >= 0; i--)
- {
- value = level[i];
- absvalue = (value >= 0) ? value : -value;
-
- if (level_two_or_higher)
- {
- if (value > 0) value--;
- else value++;
- level_two_or_higher = 0;
- }
-
- if (value >= 0)
- {
- sign = 0;
- }
- else
- {
- sign = 1;
- value = -value;
- }
-
- if (vlcnum == 0) // VLC1
- {
- if (value < 8)
- {
- status = BitstreamWriteBits(stream, value * 2 + sign - 1, 1);
- }
- else if (value < 8 + 8)
- {
- status = BitstreamWriteBits(stream, 14 + 1 + 4, (1 << 4) | ((value - 8) << 1) | sign);
- }
- else
- {
- status = BitstreamWriteBits(stream, 14 + 2 + 12, (1 << 12) | ((value - 16) << 1) | sign) ;
- }
- }
- else // VLCN
- {
- shift = vlcnum - 1;
- escape = (15 << shift) + 1;
- numPrefix = (value - 1) >> shift;
- sufmask = ~((0xffffffff) << shift);
- suffix = (value - 1) & sufmask;
- if (value < escape)
- {
- status = BitstreamWriteBits(stream, numPrefix + vlcnum + 1, (1 << (shift + 1)) | (suffix << 1) | sign);
- }
- else
- {
- status = BitstreamWriteBits(stream, 28, (1 << 12) | ((value - escape) << 1) | sign);
- }
-
- }
-
- if (absvalue > incVlc[vlcnum])
- vlcnum++;
-
- if (i == TotalCoeff - TrailingOnes - 1 && absvalue > 3)
- vlcnum = 2;
- }
-
- if (status != AVCENC_SUCCESS) /* occasionally check the bitstream */
- {
- return status;
- }
- if (TotalCoeff < maxNumCoeff)
- {
- if (!cdc)
- {
- ce_TotalZeros(stream, zerosLeft, TotalCoeff);
- }
- else
- {
- ce_TotalZerosChromaDC(stream, zerosLeft, TotalCoeff);
- }
- }
- else
- {
- zerosLeft = 0;
- }
-
- i = TotalCoeff - 1;
- while (i > 0) /* don't do the last one */
- {
- if (zerosLeft > 0)
- {
- ce_RunBefore(stream, run[i], zerosLeft);
- }
-
- zerosLeft = zerosLeft - run[i];
- i--;
- }
- }
-
- return status;
-}
diff --git a/media/libstagefright/codecs/avc/enc/src/sad.cpp b/media/libstagefright/codecs/avc/enc/src/sad.cpp
deleted file mode 100644
index ae7acd2..0000000
--- a/media/libstagefright/codecs/avc/enc/src/sad.cpp
+++ /dev/null
@@ -1,290 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-#include "sad_inline.h"
-
-#define Cached_lx 176
-
-#ifdef _SAD_STAT
-uint32 num_sad_MB = 0;
-uint32 num_sad_Blk = 0;
-uint32 num_sad_MB_call = 0;
-uint32 num_sad_Blk_call = 0;
-
-#define NUM_SAD_MB_CALL() num_sad_MB_call++
-#define NUM_SAD_MB() num_sad_MB++
-#define NUM_SAD_BLK_CALL() num_sad_Blk_call++
-#define NUM_SAD_BLK() num_sad_Blk++
-
-#else
-
-#define NUM_SAD_MB_CALL()
-#define NUM_SAD_MB()
-#define NUM_SAD_BLK_CALL()
-#define NUM_SAD_BLK()
-
-#endif
-
-
-/* consist of
-int AVCSAD_Macroblock_C(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
-int AVCSAD_MB_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
-int AVCSAD_MB_HTFM(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
-*/
-
-
-/*==================================================================
- Function: SAD_Macroblock
- Date: 09/07/2000
- Purpose: Compute SAD 16x16 between blk and ref.
- To do: Uniform subsampling will be inserted later!
- Hypothesis Testing Fast Matching to be used later!
- Changes:
- 11/7/00: implemented MMX
- 1/24/01: implemented SSE
-==================================================================*/
-/********** C ************/
-int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
-{
- (void)(extra_info);
-
- int32 x10;
- int dmin = (uint32)dmin_lx >> 16;
- int lx = dmin_lx & 0xFFFF;
-
- NUM_SAD_MB_CALL();
-
- x10 = simd_sad_mb(ref, blk, dmin, lx);
-
- return x10;
-}
-
-#ifdef HTFM /* HTFM with uniform subsampling implementation 2/28/01 */
-/*===============================================================
- Function: AVCAVCSAD_MB_HTFM_Collect and AVCSAD_MB_HTFM
- Date: 3/2/1
- Purpose: Compute the SAD on a 16x16 block using
- uniform subsampling and hypothesis testing fast matching
- for early dropout. SAD_MB_HP_HTFM_Collect is to collect
- the statistics to compute the thresholds to be used in
- SAD_MB_HP_HTFM.
- Input/Output:
- Changes:
- ===============================================================*/
-
-int AVCAVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
-{
- int i;
- int sad = 0;
- uint8 *p1;
- int lx4 = (dmin_lx << 2) & 0x3FFFC;
- uint32 cur_word;
- int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */
- int difmad;
- int madstar;
- HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
- int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
- uint *countbreak = &(htfm_stat->countbreak);
- int *offsetRef = htfm_stat->offsetRef;
-
- madstar = (uint32)dmin_lx >> 20;
-
- NUM_SAD_MB_CALL();
-
- blk -= 4;
- for (i = 0; i < 16; i++)
- {
- p1 = ref + offsetRef[i];
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- NUM_SAD_MB();
-
- saddata[i] = sad;
-
- if (i > 0)
- {
- if ((uint32)sad > ((uint32)dmin_lx >> 16))
- {
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
- return sad;
- }
- }
- }
-
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
- return sad;
-}
-
-int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
-{
- int sad = 0;
- uint8 *p1;
-
- int i;
- int tmp, tmp2;
- int lx4 = (dmin_lx << 2) & 0x3FFFC;
- int sadstar = 0, madstar;
- int *nrmlz_th = (int*) extra_info;
- int *offsetRef = (int*) extra_info + 32;
- uint32 cur_word;
-
- madstar = (uint32)dmin_lx >> 20;
-
- NUM_SAD_MB_CALL();
-
- blk -= 4;
- for (i = 0; i < 16; i++)
- {
- p1 = ref + offsetRef[i];
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = (cur_word >> 24) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[8];
- tmp2 = (cur_word >> 16) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[4];
- tmp2 = (cur_word >> 8) & 0xFF;
- sad = SUB_SAD(sad, tmp, tmp2);
- tmp = p1[0];
- p1 += lx4;
- tmp2 = (cur_word & 0xFF);
- sad = SUB_SAD(sad, tmp, tmp2);
-
- NUM_SAD_MB();
-
- sadstar += madstar;
- if (((uint32)sad <= ((uint32)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++)))
- ;
- else
- return 65536;
- }
-
- return sad;
-}
-#endif /* HTFM */
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp b/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp
deleted file mode 100644
index faf2198..0000000
--- a/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp
+++ /dev/null
@@ -1,629 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/* contains
-int AVCHalfPel1_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)
-int AVCHalfPel2_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width)
-int AVCHalfPel1_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)
-int AVCHalfPel2_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width)
-
-int AVCSAD_MB_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
-int AVCSAD_MB_HP_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
-int AVCSAD_MB_HP_HTFM(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
-int AVCSAD_Blk_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
-*/
-
-#include "avcenc_lib.h"
-#include "sad_halfpel_inline.h"
-
-#ifdef _SAD_STAT
-uint32 num_sad_HP_MB = 0;
-uint32 num_sad_HP_Blk = 0;
-uint32 num_sad_HP_MB_call = 0;
-uint32 num_sad_HP_Blk_call = 0;
-#define NUM_SAD_HP_MB_CALL() num_sad_HP_MB_call++
-#define NUM_SAD_HP_MB() num_sad_HP_MB++
-#define NUM_SAD_HP_BLK_CALL() num_sad_HP_Blk_call++
-#define NUM_SAD_HP_BLK() num_sad_HP_Blk++
-#else
-#define NUM_SAD_HP_MB_CALL()
-#define NUM_SAD_HP_MB()
-#define NUM_SAD_HP_BLK_CALL()
-#define NUM_SAD_HP_BLK()
-#endif
-
-
-
-/*===============================================================
- Function: SAD_MB_HalfPel
- Date: 09/17/2000
- Purpose: Compute the SAD on the half-pel resolution
- Input/Output: hmem is assumed to be a pointer to the starting
- point of the search in the 33x33 matrix search region
- Changes:
- 11/7/00: implemented MMX
- ===============================================================*/
-/*==================================================================
- Function: AVCSAD_MB_HalfPel_C
- Date: 04/30/2001
- Purpose: Compute SAD 16x16 between blk and ref in halfpel
- resolution,
- Changes:
- ==================================================================*/
-/* One component is half-pel */
-int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- (void)(extra_info);
-
- int i, j;
- int sad = 0;
- uint8 *kk, *p1, *p2, *p3, *p4;
-// int sumref=0;
- int temp;
- int rx = dmin_rx & 0xFFFF;
-
- NUM_SAD_HP_MB_CALL();
-
- p1 = ref;
- p2 = ref + 1;
- p3 = ref + rx;
- p4 = ref + rx + 1;
- kk = blk;
-
- for (i = 0; i < 16; i++)
- {
- for (j = 0; j < 16; j++)
- {
-
- temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;
- sad += AVC_ABS(temp);
- }
-
- NUM_SAD_HP_MB();
-
- if (sad > (int)((uint32)dmin_rx >> 16))
- return sad;
-
- p1 += rx;
- p3 += rx;
- p2 += rx;
- p4 += rx;
- }
- return sad;
-}
-
-int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- (void)(extra_info);
-
- int i, j;
- int sad = 0;
- uint8 *kk, *p1, *p2;
-// int sumref=0;
- int temp;
- int rx = dmin_rx & 0xFFFF;
-
- NUM_SAD_HP_MB_CALL();
-
- p1 = ref;
- p2 = ref + rx; /* either left/right or top/bottom pixel */
- kk = blk;
-
- for (i = 0; i < 16; i++)
- {
- for (j = 0; j < 16; j++)
- {
-
- temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;
- sad += AVC_ABS(temp);
- }
-
- NUM_SAD_HP_MB();
-
- if (sad > (int)((uint32)dmin_rx >> 16))
- return sad;
- p1 += rx;
- p2 += rx;
- }
- return sad;
-}
-
-int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- (void)(extra_info);
-
- int i, j;
- int sad = 0;
- uint8 *kk, *p1;
- int temp;
- int rx = dmin_rx & 0xFFFF;
-
- NUM_SAD_HP_MB_CALL();
-
- p1 = ref;
- kk = blk;
-
- for (i = 0; i < 16; i++)
- {
- for (j = 0; j < 16; j++)
- {
-
- temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++;
- sad += AVC_ABS(temp);
- }
-
- NUM_SAD_HP_MB();
-
- if (sad > (int)((uint32)dmin_rx >> 16))
- return sad;
- p1 += rx;
- }
- return sad;
-}
-
-#ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */
-
-//Checheck here
-int AVCAVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0;
- uint8 *p1, *p2;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int saddata[16]; /* used when collecting flag (global) is on */
- int difmad, tmp, tmp2;
- int madstar;
- HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
- int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
- UInt *countbreak = &(htfm_stat->countbreak);
- int *offsetRef = htfm_stat->offsetRef;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
- p2 = p1 + rx;
-
- j = 4;/* 4 lines */
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12] + p2[12];
- tmp2 = p1[13] + p2[13];
- tmp += tmp2;
- tmp2 = (cur_word >> 24) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8] + p2[8];
- tmp2 = p1[9] + p2[9];
- tmp += tmp2;
- tmp2 = (cur_word >> 16) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4] + p2[4];
- tmp2 = p1[5] + p2[5];
- tmp += tmp2;
- tmp2 = (cur_word >> 8) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp2 = p1[1] + p2[1];
- tmp = p1[0] + p2[0];
- p1 += refwx4;
- p2 += refwx4;
- tmp += tmp2;
- tmp2 = (cur_word & 0xFF);
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
-
- saddata[i] = sad;
-
- if (i > 0)
- {
- if (sad > ((uint32)dmin_rx >> 16))
- {
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
- return sad;
- }
- }
- }
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
-
- return sad;
-}
-
-int AVCAVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0;
- uint8 *p1, *p2;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int saddata[16]; /* used when collecting flag (global) is on */
- int difmad, tmp, tmp2;
- int madstar;
- HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
- int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
- UInt *countbreak = &(htfm_stat->countbreak);
- int *offsetRef = htfm_stat->offsetRef;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
- p2 = p1 + rx;
- j = 4;
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = p2[12];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 24) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8];
- tmp2 = p2[8];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 16) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4];
- tmp2 = p2[4];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 8) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[0];
- p1 += refwx4;
- tmp2 = p2[0];
- p2 += refwx4;
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word & 0xFF);
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
-
- saddata[i] = sad;
-
- if (i > 0)
- {
- if (sad > ((uint32)dmin_rx >> 16))
- {
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
- return sad;
- }
- }
- }
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
-
- return sad;
-}
-
-int AVCAVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0;
- uint8 *p1;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int saddata[16]; /* used when collecting flag (global) is on */
- int difmad, tmp, tmp2;
- int madstar;
- HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
- int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
- UInt *countbreak = &(htfm_stat->countbreak);
- int *offsetRef = htfm_stat->offsetRef;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
-
- j = 4; /* 4 lines */
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = p1[13];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 24) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8];
- tmp2 = p1[9];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 16) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4];
- tmp2 = p1[5];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 8) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[0];
- tmp2 = p1[1];
- p1 += refwx4;
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word & 0xFF);
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
-
- saddata[i] = sad;
-
- if (i > 0)
- {
- if (sad > ((uint32)dmin_rx >> 16))
- {
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
- return sad;
- }
- }
- }
- difmad = saddata[0] - ((saddata[1] + 1) >> 1);
- (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
- (*countbreak)++;
-
- return sad;
-}
-
-int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0, tmp, tmp2;
- uint8 *p1, *p2;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int sadstar = 0, madstar;
- int *nrmlz_th = (int*) extra_info;
- int *offsetRef = nrmlz_th + 32;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
- p2 = p1 + rx;
-
- j = 4; /* 4 lines */
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12] + p2[12];
- tmp2 = p1[13] + p2[13];
- tmp += tmp2;
- tmp2 = (cur_word >> 24) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8] + p2[8];
- tmp2 = p1[9] + p2[9];
- tmp += tmp2;
- tmp2 = (cur_word >> 16) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4] + p2[4];
- tmp2 = p1[5] + p2[5];
- tmp += tmp2;
- tmp2 = (cur_word >> 8) & 0xFF;
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- tmp2 = p1[1] + p2[1];
- tmp = p1[0] + p2[0];
- p1 += refwx4;
- p2 += refwx4;
- tmp += tmp2;
- tmp2 = (cur_word & 0xFF);
- tmp += 2;
- sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
-
- sadstar += madstar;
- if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
- {
- return 65536;
- }
- }
-
- return sad;
-}
-
-int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0, tmp, tmp2;
- uint8 *p1, *p2;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int sadstar = 0, madstar;
- int *nrmlz_th = (int*) extra_info;
- int *offsetRef = nrmlz_th + 32;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
- p2 = p1 + rx;
- j = 4;
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = p2[12];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 24) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8];
- tmp2 = p2[8];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 16) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4];
- tmp2 = p2[4];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 8) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[0];
- p1 += refwx4;
- tmp2 = p2[0];
- p2 += refwx4;
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word & 0xFF);
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
- sadstar += madstar;
- if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
- {
- return 65536;
- }
- }
-
- return sad;
-}
-
-int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
-{
- int i, j;
- int sad = 0, tmp, tmp2;
- uint8 *p1;
- int rx = dmin_rx & 0xFFFF;
- int refwx4 = rx << 2;
- int sadstar = 0, madstar;
- int *nrmlz_th = (int*) extra_info;
- int *offsetRef = nrmlz_th + 32;
- uint32 cur_word;
-
- madstar = (uint32)dmin_rx >> 20;
-
- NUM_SAD_HP_MB_CALL();
-
- blk -= 4;
-
- for (i = 0; i < 16; i++) /* 16 stages */
- {
- p1 = ref + offsetRef[i];
-
- j = 4;/* 4 lines */
- do
- {
- cur_word = *((uint32*)(blk += 4));
- tmp = p1[12];
- tmp2 = p1[13];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 24) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[8];
- tmp2 = p1[9];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 16) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[4];
- tmp2 = p1[5];
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word >> 8) & 0xFF;
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- tmp = p1[0];
- tmp2 = p1[1];
- p1 += refwx4;
- tmp++;
- tmp2 += tmp;
- tmp = (cur_word & 0xFF);
- sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
- }
- while (--j);
-
- NUM_SAD_HP_MB();
-
- sadstar += madstar;
- if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
- {
- return 65536;
- }
- }
-
- return sad;
-}
-
-#endif /* HTFM */
-
-
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
deleted file mode 100644
index 22f545a..0000000
--- a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-
-#ifndef _SAD_HALFPEL_INLINE_H_
-#define _SAD_HALFPEL_INLINE_H_
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* Intentionally not using the gcc asm version, since it is
- * slightly slower than the plain C version on modern GCC versions. */
-#if !defined(__CC_ARM) /* Generic C version */
-
- __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- tmp = (tmp2 >> 1) - tmp;
- if (tmp > 0) sad += tmp;
- else sad -= tmp;
-
- return sad;
- }
-
- __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- tmp = (tmp >> 2) - tmp2;
- if (tmp > 0) sad += tmp;
- else sad -= tmp;
-
- return sad;
- }
-
-#elif defined(__CC_ARM) /* only work with arm v5 */
-
- __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm
- {
- rsbs tmp, tmp, tmp2, asr #1 ;
- rsbmi tmp, tmp, #0 ;
- add sad, sad, tmp ;
- }
-
- return sad;
- }
-
- __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm
- {
- rsbs tmp, tmp2, tmp, asr #2 ;
- rsbmi tmp, tmp, #0 ;
- add sad, sad, tmp ;
- }
-
- return sad;
- }
-
-#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
-
- __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm__ volatile(
- "rsbs %1, %1, %2, asr #1\n\t"
- "rsbmi %1, %1, #0\n\t"
- "add %0, %0, %1"
- : "+r"(sad), "+r"(tmp)
- : "r"(tmp2)
- );
-
- return sad;
- }
-
- __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm__ volatile(
- "rsbs %1, %2, %1, asr #2\n\t"
- "rsbmi %1, %1, #0\n\t"
- "add %0, %0, %1"
- : "+r"(sad), "+r"(tmp)
- : "r"(tmp2)
- );
-
- return sad;
- }
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif //_SAD_HALFPEL_INLINE_H_
-
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
deleted file mode 100644
index 47abc65..0000000
--- a/media/libstagefright/codecs/avc/enc/src/sad_inline.h
+++ /dev/null
@@ -1,526 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#ifndef _SAD_INLINE_H_
-#define _SAD_INLINE_H_
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* Intentionally not using the gcc asm version, since it is
- * slightly slower than the plain C version on modern GCC versions. */
-#if !defined(__CC_ARM) /* Generic C version */
-
- __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- tmp = tmp - tmp2;
- if (tmp > 0) sad += tmp;
- else sad -= tmp;
-
- return sad;
- }
-
- __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
- {
- int32 x7;
-
- x7 = src2 ^ src1; /* check odd/even combination */
- if ((uint32)src2 >= (uint32)src1)
- {
- src1 = src2 - src1; /* subs */
- }
- else
- {
- src1 = src1 - src2;
- }
- x7 = x7 ^ src1; /* only odd bytes need to add carry */
- x7 = mask & ((uint32)x7 >> 1);
- x7 = (x7 << 8) - x7;
- src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */
- src1 = src1 ^(x7 >> 7); /* take absolute value of negative byte */
-
- return src1;
- }
-
-#define NUMBER 3
-#define SHIFT 24
-
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 2
-#undef SHIFT
-#define SHIFT 16
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 1
-#undef SHIFT
-#define SHIFT 8
-#include "sad_mb_offset.h"
-
-
- __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
- {
- int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
-
- x9 = 0x80808080; /* const. */
-
- x8 = (intptr_t)ref & 0x3;
- if (x8 == 3)
- goto SadMBOffset3;
- if (x8 == 2)
- goto SadMBOffset2;
- if (x8 == 1)
- goto SadMBOffset1;
-
-// x5 = (x4<<8)-x4; /* x5 = x4*255; */
- x4 = x5 = 0;
-
- x6 = 0xFFFF00FF;
-
- ref -= lx;
- blk -= 16;
-
- x8 = 16;
-
-LOOP_SAD0:
- /****** process 8 pixels ******/
- x10 = *((uint32*)(ref += lx));
- x11 = *((uint32*)(ref + 4));
- x12 = *((uint32*)(blk += 16));
- x14 = *((uint32*)(blk + 4));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****** process 8 pixels ******/
- x10 = *((uint32*)(ref + 8));
- x11 = *((uint32*)(ref + 12));
- x12 = *((uint32*)(blk + 8));
- x14 = *((uint32*)(blk + 12));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */
- {
- if (--x8)
- {
- goto LOOP_SAD0;
- }
-
- }
-
- return ((uint32)x10 >> 16);
-
-SadMBOffset3:
-
- return sad_mb_offset3(ref, blk, lx, dmin);
-
-SadMBOffset2:
-
- return sad_mb_offset2(ref, blk, lx, dmin);
-
-SadMBOffset1:
-
- return sad_mb_offset1(ref, blk, lx, dmin);
-
- }
-
-#elif defined(__CC_ARM) /* only work with arm v5 */
-
- __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm
- {
- rsbs tmp, tmp, tmp2 ;
- rsbmi tmp, tmp, #0 ;
- add sad, sad, tmp ;
- }
-
- return sad;
- }
-
- __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
- {
- int32 x7;
-
- __asm
- {
- EOR x7, src2, src1; /* check odd/even combination */
- SUBS src1, src2, src1;
- EOR x7, x7, src1;
- AND x7, mask, x7, lsr #1;
- ORRCC x7, x7, #0x80000000;
- RSB x7, x7, x7, lsl #8;
- ADD src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
- EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
- }
-
- return src1;
- }
-
- __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
- {
- int32 x7;
-
- __asm
- {
- EOR x7, src2, src1; /* check odd/even combination */
- ADDS src1, src2, src1;
- EOR x7, x7, src1; /* only odd bytes need to add carry */
- ANDS x7, mask, x7, rrx;
- RSB x7, x7, x7, lsl #8;
- SUB src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
- EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
- }
-
- return src1;
- }
-
-#define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \
- BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \
- ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \
- SBC x5, x5, x11; /* accumulate low bytes */ \
- BIC x11, x6, x11; /* x11 & 0xFF00FF00 */ \
- ADD x4, x4, x11,lsr #8; } /* accumulate high bytes */
-
-
-#define NUMBER 3
-#define SHIFT 24
-#define INC_X8 0x08000001
-
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 2
-#undef SHIFT
-#define SHIFT 16
-#undef INC_X8
-#define INC_X8 0x10000001
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 1
-#undef SHIFT
-#define SHIFT 8
-#undef INC_X8
-#define INC_X8 0x08000001
-#include "sad_mb_offset.h"
-
-
- __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
- {
- int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
-
- x9 = 0x80808080; /* const. */
- x4 = x5 = 0;
-
- __asm
- {
- MOVS x8, ref, lsl #31 ;
- BHI SadMBOffset3;
- BCS SadMBOffset2;
- BMI SadMBOffset1;
-
- MVN x6, #0xFF00;
- }
-LOOP_SAD0:
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 12));
- x10 = *((int32*)(ref + 8));
- x14 = *((int32*)(blk + 12));
- x12 = *((int32*)(blk + 8));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- __asm
- {
- /****** process 8 pixels ******/
- LDR x11, [ref, #4];
- LDR x10, [ref], lx ;
- LDR x14, [blk, #4];
- LDR x12, [blk], #16 ;
- }
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- __asm
- {
- /****************/
- RSBS x11, dmin, x10, lsr #16;
- ADDLSS x8, x8, #0x10000001;
- BLS LOOP_SAD0;
- }
-
- return ((uint32)x10 >> 16);
-
-SadMBOffset3:
-
- return sad_mb_offset3(ref, blk, lx, dmin, x8);
-
-SadMBOffset2:
-
- return sad_mb_offset2(ref, blk, lx, dmin, x8);
-
-SadMBOffset1:
-
- return sad_mb_offset1(ref, blk, lx, dmin, x8);
- }
-
-
-#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
-
- __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
- {
- __asm__ volatile(
- "rsbs %1, %1, %2\n\t"
- "rsbmi %1, %1, #0\n\t"
- "add %0, %0, %1"
- : "+r"(sad), "+r"(tmp)
- : "r"(tmp2)
- );
- return sad;
- }
-
- __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
- {
- int32 x7;
-
- __asm__ volatile(
- "EOR %1, %2, %0\n\t"
- "SUBS %0, %2, %0\n\t"
- "EOR %1, %1, %0\n\t"
- "AND %1, %3, %1, lsr #1\n\t"
- "ORRCC %1, %1, #0x80000000\n\t"
- "RSB %1, %1, %1, lsl #8\n\t"
- "ADD %0, %0, %1, asr #7\n\t"
- "EOR %0, %0, %1, asr #7"
- : "+r"(src1), "=&r"(x7)
- : "r"(src2), "r"(mask)
- );
-
- return src1;
- }
-
- __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
- {
- int32 x7;
-
- __asm__ volatile(
- "EOR %1, %2, %0\n\t"
- "ADDS %0, %2, %0\n\t"
- "EOR %1, %1, %0\n\t"
- "ANDS %1, %3, %1, rrx\n\t"
- "RSB %1, %1, %1, lsl #8\n\t"
- "SUB %0, %0, %1, asr #7\n\t"
- "EOR %0, %0, %1, asr #7"
- : "+r"(src1), "=&r"(x7)
- : "r"(src2), "r"(mask)
- );
-
- return src1;
- }
-
-#define sum_accumulate __asm__ volatile( \
- "SBC %0, %0, %1\n\t" \
- "BIC %1, %4, %1\n\t" \
- "ADD %2, %2, %1, lsr #8\n\t" \
- "SBC %0, %0, %3\n\t" \
- "BIC %3, %4, %3\n\t" \
- "ADD %2, %2, %3, lsr #8" \
- : "+r" (x5), "+r" (x10), "+r" (x4), "+r" (x11) \
- : "r" (x6) \
- );
-
-#define NUMBER 3
-#define SHIFT 24
-#define INC_X8 0x08000001
-
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 2
-#undef SHIFT
-#define SHIFT 16
-#undef INC_X8
-#define INC_X8 0x10000001
-#include "sad_mb_offset.h"
-
-#undef NUMBER
-#define NUMBER 1
-#undef SHIFT
-#define SHIFT 8
-#undef INC_X8
-#define INC_X8 0x08000001
-#include "sad_mb_offset.h"
-
-
- __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
- {
- int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
-
- x9 = 0x80808080; /* const. */
- x4 = x5 = 0;
-
- x8 = (uint32)ref & 0x3;
- if (x8 == 3)
- goto SadMBOffset3;
- if (x8 == 2)
- goto SadMBOffset2;
- if (x8 == 1)
- goto SadMBOffset1;
-
- x8 = 16;
-///
- __asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
-
-LOOP_SAD0:
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 12));
- x10 = *((int32*)(ref + 8));
- x14 = *((int32*)(blk + 12));
- x12 = *((int32*)(blk + 8));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 4));
- __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "+r"(ref): "r"(lx));
- //x10 = *((int32*)ref); ref+=lx;
- x14 = *((int32*)(blk + 4));
- __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "+r"(blk));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- /****************/
-
- if (((uint32)x10 >> 16) <= dmin) /* compare with dmin */
- {
- if (--x8)
- {
- goto LOOP_SAD0;
- }
-
- }
-
- return ((uint32)x10 >> 16);
-
-SadMBOffset3:
-
- return sad_mb_offset3(ref, blk, lx, dmin);
-
-SadMBOffset2:
-
- return sad_mb_offset2(ref, blk, lx, dmin);
-
-SadMBOffset1:
-
- return sad_mb_offset1(ref, blk, lx, dmin);
- }
-
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // _SAD_INLINE_H_
-
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
deleted file mode 100644
index 20ca7eb..0000000
--- a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-
-/* Intentionally not using the gcc asm version, since it is
- * slightly slower than the plain C version on modern GCC versions. */
-#if !defined(__CC_ARM) /* Generic C version */
-
-#if (NUMBER==3)
-__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)
-#elif (NUMBER==2)
-__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)
-#elif (NUMBER==1)
-__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
-#endif
-{
- int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
-
- // x5 = (x4<<8) - x4;
- x4 = x5 = 0;
- x6 = 0xFFFF00FF;
- x9 = 0x80808080; /* const. */
- ref -= NUMBER; /* bic ref, ref, #3 */
- ref -= lx;
- blk -= 16;
- x8 = 16;
-
-#if (NUMBER==3)
-LOOP_SAD3:
-#elif (NUMBER==2)
-LOOP_SAD2:
-#elif (NUMBER==1)
-LOOP_SAD1:
-#endif
- /****** process 8 pixels ******/
- x10 = *((uint32*)(ref += lx)); /* D C B A */
- x11 = *((uint32*)(ref + 4)); /* H G F E */
- x12 = *((uint32*)(ref + 8)); /* L K J I */
-
- x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */
- x10 = x10 | (x11 << (32 - SHIFT)); /* G F E D */
- x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */
- x11 = x11 | (x12 << (32 - SHIFT)); /* K J I H */
-
- x12 = *((uint32*)(blk += 16));
- x14 = *((uint32*)(blk + 4));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****** process 8 pixels ******/
- x10 = *((uint32*)(ref + 8)); /* D C B A */
- x11 = *((uint32*)(ref + 12)); /* H G F E */
- x12 = *((uint32*)(ref + 16)); /* L K J I */
-
- x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24 = 0xFF 0xFF 0xFF ~D */
- x10 = x10 | (x11 << (32 - SHIFT)); /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */
- x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */
- x11 = x11 | (x12 << (32 - SHIFT)); /* ~K ~J ~I ~H */
-
- x12 = *((uint32*)(blk + 8));
- x14 = *((uint32*)(blk + 12));
-
- /* process x11 & x14 */
- x11 = sad_4pixel(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixel(x10, x12, x9);
-
- x5 = x5 + x10; /* accumulate low bytes */
- x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
- x5 = x5 + x11; /* accumulate low bytes */
- x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
- x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */
- {
- if (--x8)
- {
-#if (NUMBER==3)
- goto LOOP_SAD3;
-#elif (NUMBER==2)
- goto LOOP_SAD2;
-#elif (NUMBER==1)
- goto LOOP_SAD1;
-#endif
- }
-
- }
-
- return ((uint32)x10 >> 16);
-}
-
-#elif defined(__CC_ARM) /* only work with arm v5 */
-
-#if (NUMBER==3)
-__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
-#elif (NUMBER==2)
-__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
-#elif (NUMBER==1)
-__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
-#endif
-{
- int32 x4, x5, x6, x9, x10, x11, x12, x14;
-
- x9 = 0x80808080; /* const. */
- x4 = x5 = 0;
-
- __asm{
- MVN x6, #0xff0000;
-#if (NUMBER==3)
-LOOP_SAD3:
-#elif (NUMBER==2)
-LOOP_SAD2:
-#elif (NUMBER==1)
-LOOP_SAD1:
-#endif
- BIC ref, ref, #3;
- }
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 12));
- x12 = *((int32*)(ref + 16));
- x10 = *((int32*)(ref + 8));
- x14 = *((int32*)(blk + 12));
-
- __asm{
- MVN x10, x10, lsr #SHIFT;
- BIC x10, x10, x11, lsl #(32-SHIFT);
- MVN x11, x11, lsr #SHIFT;
- BIC x11, x11, x12, lsl #(32-SHIFT);
-
- LDR x12, [blk, #8];
- }
-
- /* process x11 & x14 */
- x11 = sad_4pixelN(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixelN(x10, x12, x9);
-
- sum_accumulate;
-
- __asm{
- /****** process 8 pixels ******/
- LDR x11, [ref, #4];
- LDR x12, [ref, #8];
- LDR x10, [ref], lx ;
- LDR x14, [blk, #4];
-
- MVN x10, x10, lsr #SHIFT;
- BIC x10, x10, x11, lsl #(32-SHIFT);
- MVN x11, x11, lsr #SHIFT;
- BIC x11, x11, x12, lsl #(32-SHIFT);
-
- LDR x12, [blk], #16;
- }
-
- /* process x11 & x14 */
- x11 = sad_4pixelN(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixelN(x10, x12, x9);
-
- sum_accumulate;
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- __asm{
- RSBS x11, dmin, x10, lsr #16
- ADDLSS x8, x8, #INC_X8
-#if (NUMBER==3)
- BLS LOOP_SAD3;
-#elif (NUMBER==2)
-BLS LOOP_SAD2;
-#elif (NUMBER==1)
-BLS LOOP_SAD1;
-#endif
- }
-
- return ((uint32)x10 >> 16);
-}
-
-#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
-
-#if (NUMBER==3)
-__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)
-#elif (NUMBER==2)
-__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)
-#elif (NUMBER==1)
-__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
-#endif
-{
- int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
-
- x9 = 0x80808080; /* const. */
- x4 = x5 = 0;
- x8 = 16; //<<===========*******
-
- __asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
-
-#if (NUMBER==3)
-LOOP_SAD3:
-#elif (NUMBER==2)
-LOOP_SAD2:
-#elif (NUMBER==1)
-LOOP_SAD1:
-#endif
- __asm__ volatile("BIC %0, %0, #3": "+r"(ref));
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 12));
- x12 = *((int32*)(ref + 16));
- x10 = *((int32*)(ref + 8));
- x14 = *((int32*)(blk + 12));
-
-#if (SHIFT==8)
- __asm__ volatile(
- "MVN %0, %0, lsr #8\n\t"
- "BIC %0, %0, %1, lsl #24\n\t"
- "MVN %1, %1, lsr #8\n\t"
- "BIC %1, %1, %2, lsl #24"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#elif (SHIFT==16)
- __asm__ volatile(
- "MVN %0, %0, lsr #16\n\t"
- "BIC %0, %0, %1, lsl #16\n\t"
- "MVN %1, %1, lsr #16\n\t"
- "BIC %1, %1, %2, lsl #16"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#elif (SHIFT==24)
- __asm__ volatile(
- "MVN %0, %0, lsr #24\n\t"
- "BIC %0, %0, %1, lsl #8\n\t"
- "MVN %1, %1, lsr #24\n\t"
- "BIC %1, %1, %2, lsl #8"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#endif
-
- x12 = *((int32*)(blk + 8));
-
- /* process x11 & x14 */
- x11 = sad_4pixelN(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixelN(x10, x12, x9);
-
- sum_accumulate;
-
- /****** process 8 pixels ******/
- x11 = *((int32*)(ref + 4));
- x12 = *((int32*)(ref + 8));
- x10 = *((int32*)ref); ref += lx;
- x14 = *((int32*)(blk + 4));
-
-#if (SHIFT==8)
- __asm__ volatile(
- "MVN %0, %0, lsr #8\n\t"
- "BIC %0, %0, %1, lsl #24\n\t"
- "MVN %1, %1, lsr #8\n\t"
- "BIC %1, %1, %2, lsl #24"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#elif (SHIFT==16)
- __asm__ volatile(
- "MVN %0, %0, lsr #16\n\t"
- "BIC %0, %0, %1, lsl #16\n\t"
- "MVN %1, %1, lsr #16\n\t"
- "BIC %1, %1, %2, lsl #16"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#elif (SHIFT==24)
- __asm__ volatile(
- "MVN %0, %0, lsr #24\n\t"
- "BIC %0, %0, %1, lsl #8\n\t"
- "MVN %1, %1, lsr #24\n\t"
- "BIC %1, %1, %2, lsl #8"
- : "+r"(x10), "+r"(x11)
- : "r"(x12)
- );
-#endif
- __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "+r"(blk));
-
- /* process x11 & x14 */
- x11 = sad_4pixelN(x11, x14, x9);
-
- /* process x12 & x10 */
- x10 = sad_4pixelN(x10, x12, x9);
-
- sum_accumulate;
-
- /****************/
- x10 = x5 - (x4 << 8); /* extract low bytes */
- x10 = x10 + x4; /* add with high bytes */
- x10 = x10 + (x10 << 16); /* add with lower half word */
-
- if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
- {
- if (--x8)
- {
-#if (NUMBER==3)
- goto LOOP_SAD3;
-#elif (NUMBER==2)
- goto LOOP_SAD2;
-#elif (NUMBER==1)
- goto LOOP_SAD1;
-#endif
- }
-
- }
-
- return ((uint32)x10 >> 16);
-}
-
-#endif
-
diff --git a/media/libstagefright/codecs/avc/enc/src/slice.cpp b/media/libstagefright/codecs/avc/enc/src/slice.cpp
deleted file mode 100644
index f6d066e..0000000
--- a/media/libstagefright/codecs/avc/enc/src/slice.cpp
+++ /dev/null
@@ -1,1025 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-
-AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- AVCCommonObj *video = encvid->common;
- AVCPicParamSet *pps = video->currPicParams;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- AVCMacroblock *currMB ;
- AVCEncBitstream *stream = encvid->bitstream;
- uint slice_group_id;
- int CurrMbAddr, slice_type;
-
- slice_type = video->slice_type;
-
- /* set the first mb in slice */
- video->mbNum = CurrMbAddr = sliceHdr->first_mb_in_slice;// * (1+video->MbaffFrameFlag);
- slice_group_id = video->MbToSliceGroupMap[CurrMbAddr];
-
- video->mb_skip_run = 0;
-
- /* while loop , see subclause 7.3.4 */
- while (1)
- {
- video->mbNum = CurrMbAddr;
- currMB = video->currMB = &(video->mblock[CurrMbAddr]);
- currMB->slice_id = video->slice_id; // for deblocking
-
- video->mb_x = CurrMbAddr % video->PicWidthInMbs;
- video->mb_y = CurrMbAddr / video->PicWidthInMbs;
-
- /* initialize QP for this MB here*/
- /* calculate currMB->QPy */
- RCInitMBQP(encvid);
-
- /* check the availability of neighboring macroblocks */
- InitNeighborAvailability(video, CurrMbAddr);
-
- /* Assuming that InitNeighborAvailability has been called prior to this function */
- video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
- /* this is necessary for all subsequent intra search */
-
- if (!video->currPicParams->constrained_intra_pred_flag)
- {
- video->intraAvailA = video->mbAvailA;
- video->intraAvailB = video->mbAvailB;
- video->intraAvailC = video->mbAvailC;
- video->intraAvailD = video->mbAvailD;
- }
- else
- {
- if (video->mbAvailA)
- {
- video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
- }
- if (video->mbAvailB)
- {
- video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
- }
- if (video->mbAvailC)
- {
- video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
- }
- if (video->mbAvailD)
- {
- video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
- }
- }
-
- /* encode_one_macroblock() */
- status = EncodeMB(encvid);
- if (status != AVCENC_SUCCESS)
- {
- break;
- }
-
- /* go to next MB */
- CurrMbAddr++;
-
- while ((uint)video->MbToSliceGroupMap[CurrMbAddr] != slice_group_id &&
- (uint)CurrMbAddr < video->PicSizeInMbs)
- {
- CurrMbAddr++;
- }
-
- if ((uint)CurrMbAddr >= video->PicSizeInMbs)
- {
- /* end of slice, return, but before that check to see if there are other slices
- to be encoded. */
- encvid->currSliceGroup++;
- if (encvid->currSliceGroup > (int)pps->num_slice_groups_minus1) /* no more slice group */
- {
- status = AVCENC_PICTURE_READY;
- break;
- }
- else
- {
- /* find first_mb_num for the next slice */
- CurrMbAddr = 0;
- while (video->MbToSliceGroupMap[CurrMbAddr] != encvid->currSliceGroup &&
- (uint)CurrMbAddr < video->PicSizeInMbs)
- {
- CurrMbAddr++;
- }
- if ((uint)CurrMbAddr >= video->PicSizeInMbs)
- {
- status = AVCENC_SLICE_EMPTY; /* error, one slice group has no MBs in it */
- }
-
- video->mbNum = CurrMbAddr;
- status = AVCENC_SUCCESS;
- break;
- }
- }
- }
-
- if (video->mb_skip_run > 0)
- {
- /* write skip_run */
- if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
- {
- ue_v(stream, video->mb_skip_run);
- video->mb_skip_run = 0;
- }
- else /* shouldn't happen */
- {
- status = AVCENC_FAIL;
- }
- }
-
- return status;
-}
-
-
-AVCEnc_Status EncodeMB(AVCEncObject *encvid)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- AVCCommonObj *video = encvid->common;
- AVCPictureData *currPic = video->currPic;
- AVCFrameIO *currInput = encvid->currInput;
- AVCMacroblock *currMB = video->currMB;
- AVCMacroblock *MB_A, *MB_B;
- AVCEncBitstream *stream = encvid->bitstream;
- AVCRateControl *rateCtrl = encvid->rateCtrl;
- uint8 *cur, *curL, *curCb, *curCr;
- uint8 *orgL, *orgCb, *orgCr, *org4;
- int CurrMbAddr = video->mbNum;
- int picPitch = currPic->pitch;
- int orgPitch = currInput->pitch;
- int x_position = (video->mb_x << 4);
- int y_position = (video->mb_y << 4);
- int offset;
- int b8, b4, blkidx;
- AVCResidualType resType;
- int slice_type;
- int numcoeff; /* output from residual_block_cavlc */
- int cost16, cost8;
-
- int num_bits, start_mb_bits, start_text_bits;
-
- slice_type = video->slice_type;
-
- /* now, point to the reconstructed frame */
- offset = y_position * picPitch + x_position;
- curL = currPic->Sl + offset;
- orgL = currInput->YCbCr[0] + offset;
- offset = (offset + x_position) >> 2;
- curCb = currPic->Scb + offset;
- curCr = currPic->Scr + offset;
- orgCb = currInput->YCbCr[1] + offset;
- orgCr = currInput->YCbCr[2] + offset;
-
- if (orgPitch != picPitch)
- {
- offset = y_position * (orgPitch - picPitch);
- orgL += offset;
- offset >>= 2;
- orgCb += offset;
- orgCr += offset;
- }
-
- /******* determine MB prediction mode *******/
- if (encvid->intraSearch[CurrMbAddr])
- {
- MBIntraSearch(encvid, CurrMbAddr, curL, picPitch);
- }
- /******* This part should be determined somehow ***************/
- if (currMB->mbMode == AVC_I_PCM)
- {
- /* write down mb_type and PCM data */
- /* and copy from currInput to currPic */
- status = EncodeIntraPCM(encvid);
-
-
- return status;
- }
-
- /****** for intra prediction, pred is already done *******/
- /****** for I4, the recon is ready and Xfrm coefs are ready to be encoded *****/
-
- //RCCalculateMAD(encvid,currMB,orgL,orgPitch); // no need to re-calculate MAD for Intra
- // not used since totalSAD is used instead
-
- /* compute the prediction */
- /* output is video->pred_block */
- if (!currMB->mb_intra)
- {
- AVCMBMotionComp(encvid, video); /* perform prediction and residue calculation */
- /* we can do the loop here and call dct_luma */
- video->pred_pitch = picPitch;
- currMB->CBP = 0;
- cost16 = 0;
- cur = curL;
- org4 = orgL;
-
- for (b8 = 0; b8 < 4; b8++)
- {
- cost8 = 0;
-
- for (b4 = 0; b4 < 4; b4++)
- {
- blkidx = blkIdx2blkXY[b8][b4];
- video->pred_block = cur;
- numcoeff = dct_luma(encvid, blkidx, cur, org4, &cost8);
- currMB->nz_coeff[blkidx] = numcoeff;
- if (numcoeff)
- {
- video->cbp4x4 |= (1 << blkidx);
- currMB->CBP |= (1 << b8);
- }
-
- if (b4&1)
- {
- cur += ((picPitch << 2) - 4);
- org4 += ((orgPitch << 2) - 4);
- }
- else
- {
- cur += 4;
- org4 += 4;
- }
- }
-
- /* move the IDCT part out of dct_luma to accommodate the check
- for coeff_cost. */
-
- if ((currMB->CBP&(1 << b8)) && (cost8 <= _LUMA_COEFF_COST_))
- {
- cost8 = 0; // reset it
-
- currMB->CBP ^= (1 << b8);
- blkidx = blkIdx2blkXY[b8][0];
-
- currMB->nz_coeff[blkidx] = 0;
- currMB->nz_coeff[blkidx+1] = 0;
- currMB->nz_coeff[blkidx+4] = 0;
- currMB->nz_coeff[blkidx+5] = 0;
- }
-
- cost16 += cost8;
-
- if (b8&1)
- {
- cur -= 8;
- org4 -= 8;
- }
- else
- {
- cur += (8 - (picPitch << 3));
- org4 += (8 - (orgPitch << 3));
- }
- }
-
- /* after the whole MB, we do another check for coeff_cost */
- if ((currMB->CBP&0xF) && (cost16 <= _LUMA_MB_COEFF_COST_))
- {
- currMB->CBP = 0; // reset it to zero
- memset(currMB->nz_coeff, 0, sizeof(uint8)*16);
- }
-
- // now we do IDCT
- MBInterIdct(video, curL, currMB, picPitch);
-
-// video->pred_block = video->pred + 256;
- }
- else /* Intra prediction */
- {
- encvid->numIntraMB++;
-
- if (currMB->mbMode == AVC_I16) /* do prediction for the whole macroblock */
- {
- currMB->CBP = 0;
- /* get the prediction from encvid->pred_i16 */
- dct_luma_16x16(encvid, curL, orgL);
- }
- video->pred_block = encvid->pred_ic[currMB->intra_chroma_pred_mode];
- }
-
- /* chrominance */
- /* not need to do anything, the result is in encvid->pred_ic
- chroma dct must be aware that prediction block can come from either intra or inter. */
-
- dct_chroma(encvid, curCb, orgCb, 0);
-
- dct_chroma(encvid, curCr, orgCr, 1);
-
-
- /* 4.1 if there's nothing in there, video->mb_skip_run++ */
- /* 4.2 if coded, check if there is a run of skipped MB, encodes it,
- set video->QPyprev = currMB->QPy; */
-
- /* 5. vlc encode */
-
- /* check for skipped macroblock, INTER only */
- if (!currMB->mb_intra)
- {
- /* decide whether this MB (for inter MB) should be skipped if there's nothing left. */
- if (!currMB->CBP && currMB->NumMbPart == 1 && currMB->QPy == video->QPy)
- {
- if (currMB->MBPartPredMode[0][0] == AVC_Pred_L0 && currMB->ref_idx_L0[0] == 0)
- {
- MB_A = &video->mblock[video->mbAddrA];
- MB_B = &video->mblock[video->mbAddrB];
-
- if (!video->mbAvailA || !video->mbAvailB)
- {
- if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/
- {
- currMB->mbMode = AVC_SKIP;
- video->mvd_l0[0][0][0] = 0;
- video->mvd_l0[0][0][1] = 0;
- }
- }
- else
- {
- if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) ||
- (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0))
- {
- if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/
- {
- currMB->mbMode = AVC_SKIP;
- video->mvd_l0[0][0][0] = 0;
- video->mvd_l0[0][0][1] = 0;
- }
- }
- else if (video->mvd_l0[0][0][0] == 0 && video->mvd_l0[0][0][1] == 0)
- {
- currMB->mbMode = AVC_SKIP;
- }
- }
- }
-
- if (currMB->mbMode == AVC_SKIP)
- {
- video->mb_skip_run++;
-
- /* set parameters */
- /* not sure whether we need the followings */
- if (slice_type == AVC_P_SLICE)
- {
- currMB->mbMode = AVC_SKIP;
- currMB->MbPartWidth = currMB->MbPartHeight = 16;
- currMB->MBPartPredMode[0][0] = AVC_Pred_L0;
- currMB->NumMbPart = 1;
- currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =
- currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1;
- currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =
- currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;
- currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =
- currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;
-
- }
- else if (slice_type == AVC_B_SLICE)
- {
- currMB->mbMode = AVC_SKIP;
- currMB->MbPartWidth = currMB->MbPartHeight = 8;
- currMB->MBPartPredMode[0][0] = AVC_Direct;
- currMB->NumMbPart = -1;
- }
-
- /* for skipped MB, always look at the first entry in RefPicList */
- currMB->RefIdx[0] = currMB->RefIdx[1] =
- currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;
-
- /* do not return yet, need to do some copies */
- }
- }
- }
- /* non-skipped MB */
-
-
- /************* START ENTROPY CODING *************************/
-
- start_mb_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
-
- /* encode mb_type, mb_pred, sub_mb_pred, CBP */
- if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE && currMB->mbMode != AVC_SKIP)
- {
- //if(!pps->entropy_coding_mode_flag) ALWAYS true
- {
- ue_v(stream, video->mb_skip_run);
- video->mb_skip_run = 0;
- }
- }
-
- if (currMB->mbMode != AVC_SKIP)
- {
- status = EncodeMBHeader(currMB, encvid);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
-
- start_text_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
-
- /**** now decoding part *******/
- resType = AVC_Luma;
-
- /* DC transform for luma I16 mode */
- if (currMB->mbMode == AVC_I16)
- {
- /* vlc encode level/run */
- status = enc_residual_block(encvid, AVC_Intra16DC, encvid->numcoefdc, currMB);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- resType = AVC_Intra16AC;
- }
-
- /* VLC encoding for luma */
- for (b8 = 0; b8 < 4; b8++)
- {
- if (currMB->CBP&(1 << b8))
- {
- for (b4 = 0; b4 < 4; b4++)
- {
- /* vlc encode level/run */
- status = enc_residual_block(encvid, resType, (b8 << 2) + b4, currMB);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
- }
- }
-
- /* chroma */
- if (currMB->CBP & (3 << 4)) /* chroma DC residual present */
- {
- for (b8 = 0; b8 < 2; b8++) /* for iCbCr */
- {
- /* vlc encode level/run */
- status = enc_residual_block(encvid, AVC_ChromaDC, encvid->numcoefcdc[b8] + (b8 << 3), currMB);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
- }
-
- if (currMB->CBP & (2 << 4))
- {
- /* AC part */
- for (b8 = 0; b8 < 2; b8++) /* for iCbCr */
- {
- for (b4 = 0; b4 < 4; b4++) /* for each block inside Cb or Cr */
- {
- /* vlc encode level/run */
- status = enc_residual_block(encvid, AVC_ChromaAC, 16 + (b8 << 2) + b4, currMB);
- if (status != AVCENC_SUCCESS)
- {
- return status;
- }
- }
- }
- }
-
-
- num_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
-
- RCPostMB(video, rateCtrl, start_text_bits - start_mb_bits,
- num_bits - start_text_bits);
-
-// num_bits -= start_mb_bits;
-// fprintf(fdebug,"MB #%d: %d bits\n",CurrMbAddr,num_bits);
-// fclose(fdebug);
- return status;
-}
-
-/* copy the content from predBlock back to the reconstructed YUV frame */
-void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picPitch)
-{
- int j, offset;
- uint32 *dst, *dst2, *src;
-
- dst = (uint32*)curL;
- src = (uint32*)predBlock;
-
- offset = (picPitch - 16) >> 2;
-
- for (j = 0; j < 16; j++)
- {
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
-
- dst += offset;
- }
-
- dst = (uint32*)curCb;
- dst2 = (uint32*)curCr;
- offset >>= 1;
-
- for (j = 0; j < 8; j++)
- {
- *dst++ = *src++;
- *dst++ = *src++;
- *dst2++ = *src++;
- *dst2++ = *src++;
-
- dst += offset;
- dst2 += offset;
- }
- return ;
-}
-
-/* encode mb_type, mb_pred, sub_mb_pred, CBP */
-/* decide whether this MB (for inter MB) should be skipped */
-AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *encvid)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- uint mb_type;
- AVCCommonObj *video = encvid->common;
- AVCEncBitstream *stream = encvid->bitstream;
-
- if (currMB->CBP > 47) /* chroma CBP is 11 */
- {
- currMB->CBP -= 16; /* remove the 5th bit from the right */
- }
-
- mb_type = InterpretMBType(currMB, video->slice_type);
-
- status = ue_v(stream, mb_type);
-
- if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)
- {
- status = sub_mb_pred(video, currMB, stream);
- }
- else
- {
- status = mb_pred(video, currMB, stream) ;
- }
-
- if (currMB->mbMode != AVC_I16)
- {
- /* decode coded_block_pattern */
- status = EncodeCBP(currMB, stream);
- }
-
- /* calculate currMB->mb_qp_delta = currMB->QPy - video->QPyprev */
- if (currMB->CBP > 0 || currMB->mbMode == AVC_I16)
- {
- status = se_v(stream, currMB->QPy - video->QPy);
- video->QPy = currMB->QPy; /* = (video->QPyprev + currMB->mb_qp_delta + 52)%52; */
- // no need video->QPc = currMB->QPc;
- }
- else
- {
- if (currMB->QPy != video->QPy) // current QP is not the same as previous QP
- {
- /* restore these values */
- RCRestoreQP(currMB, video, encvid);
- }
- }
-
- return status;
-}
-
-
-/* inputs are mbMode, mb_intra, i16Mode, CBP, NumMbPart, MbPartWidth, MbPartHeight */
-uint InterpretMBType(AVCMacroblock *currMB, int slice_type)
-{
- int CBP_chrom;
- int mb_type;// part1, part2, part3;
-// const static int MapParts2Type[2][3][3]={{{4,8,12},{10,6,14},{16,18,20}},
-// {{5,9,13},{11,7,15},{17,19,21}}};
-
- if (currMB->mb_intra)
- {
- if (currMB->mbMode == AVC_I4)
- {
- mb_type = 0;
- }
- else if (currMB->mbMode == AVC_I16)
- {
- CBP_chrom = (currMB->CBP & 0x30);
- if (currMB->CBP&0xF)
- {
- currMB->CBP |= 0xF; /* either 0x0 or 0xF */
- mb_type = 13;
- }
- else
- {
- mb_type = 1;
- }
- mb_type += (CBP_chrom >> 2) + currMB->i16Mode;
- }
- else /* if(currMB->mbMode == AVC_I_PCM) */
- {
- mb_type = 25;
- }
- }
- else
- { /* P-MB *//* note that the order of the enum AVCMBMode cannot be changed
- since we use it here. */
- mb_type = currMB->mbMode - AVC_P16;
- }
-
- if (slice_type == AVC_P_SLICE)
- {
- if (currMB->mb_intra)
- {
- mb_type += 5;
- }
- }
- // following codes have not been tested yet, not needed.
- /* else if(slice_type == AVC_B_SLICE)
- {
- if(currMB->mbMode == AVC_BDirect16)
- {
- mb_type = 0;
- }
- else if(currMB->mbMode == AVC_P16)
- {
- mb_type = currMB->MBPartPredMode[0][0] + 1; // 1 or 2
- }
- else if(currMB->mbMode == AVC_P8)
- {
- mb_type = 26;
- }
- else if(currMB->mbMode == AVC_P8ref0)
- {
- mb_type = 27;
- }
- else
- {
- part1 = currMB->mbMode - AVC_P16x8;
- part2 = currMB->MBPartPredMode[0][0];
- part3 = currMB->MBPartPredMode[1][0];
- mb_type = MapParts2Type[part1][part2][part3];
- }
- }
-
- if(slice_type == AVC_SI_SLICE)
- {
- mb_type++;
- }
- */
- return (uint)mb_type;
-}
-
-//const static int mbPart2raster[3][4] = {{0,0,0,0},{1,1,0,0},{1,0,1,0}};
-
-/* see subclause 7.3.5.1 */
-AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- int mbPartIdx;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- int max_ref_idx;
- uint code;
-
- if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)
- {
- if (currMB->mbMode == AVC_I4)
- {
- /* perform prediction to get the actual intra 4x4 pred mode */
- EncodeIntra4x4Mode(video, currMB, stream);
- /* output will be in currMB->i4Mode[4][4] */
- }
-
- /* assume already set from MBPrediction() */
- status = ue_v(stream, currMB->intra_chroma_pred_mode);
- }
- else if (currMB->MBPartPredMode[0][0] != AVC_Direct)
- {
-
- memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
- max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
- /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
- max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1;
- */
- /* decode ref index for L0 */
- if (sliceHdr->num_ref_idx_l0_active_minus1 > 0)
- {
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- if (/*(sliceHdr->num_ref_idx_l0_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
- {
- code = currMB->ref_idx_L0[mbPartIdx];
- status = te_v(stream, code, max_ref_idx);
- }
- }
- }
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
- max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
- /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
- max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;
- */
- /* decode ref index for L1 */
- if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)
- {
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
- {
- status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);
- }
- }
- }
-
- /* encode mvd_l0 */
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
- {
- status = se_v(stream, video->mvd_l0[mbPartIdx][0][0]);
- status = se_v(stream, video->mvd_l0[mbPartIdx][0][1]);
- }
- }
- /* encode mvd_l1 */
- for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
- {
- if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
- {
- status = se_v(stream, video->mvd_l1[mbPartIdx][0][0]);
- status = se_v(stream, video->mvd_l1[mbPartIdx][0][1]);
- }
- }
- }
-
- return status;
-}
-
-/* see subclause 7.3.5.2 */
-AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
-{
- AVCEnc_Status status = AVCENC_SUCCESS;
- int mbPartIdx, subMbPartIdx;
- AVCSliceHeader *sliceHdr = video->sliceHdr;
- uint max_ref_idx;
- uint slice_type = video->slice_type;
- uint sub_mb_type[4];
-
- /* this should move somewhere else where we don't have to make this check */
- if (currMB->mbMode == AVC_P8ref0)
- {
- memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
- }
-
- /* we have to check the values to make sure they are valid */
- /* assign values to currMB->sub_mb_type[] */
- if (slice_type == AVC_P_SLICE)
- {
- InterpretSubMBTypeP(currMB, sub_mb_type);
- }
- /* no need to check for B-slice
- else if(slice_type == AVC_B_SLICE)
- {
- InterpretSubMBTypeB(currMB,sub_mb_type);
- }*/
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- status = ue_v(stream, sub_mb_type[mbPartIdx]);
- }
-
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
- max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
- /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
- max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1; */
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- if ((sliceHdr->num_ref_idx_l0_active_minus1 > 0 /*|| currMB->mb_field_decoding_flag*/) &&
- currMB->mbMode != AVC_P8ref0 && /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
- {
- status = te_v(stream, currMB->ref_idx_L0[mbPartIdx], max_ref_idx);
- }
- /* used in deblocking */
- currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx;
- }
- /* see subclause 7.4.5.1 for the range of ref_idx_lX */
- max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
- /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
- max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/
-
- if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)
- {
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
- /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
- {
- status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);
- }
- }
- }
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
- {
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][0]);
- status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][1]);
- }
- }
- }
-
- for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
- {
- if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
- currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
- {
- for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
- {
- status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][0]);
- status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][1]);
- }
- }
- }
-
- return status;
-}
-
-/* input is mblock->sub_mb_type[] */
-void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type)
-{
- int i;
- /* see enum AVCMBType declaration */
- /*const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4};
- const static int map2subPartWidth[4] = {8,8,4,4};
- const static int map2subPartHeight[4] = {8,4,8,4};
- const static int map2numSubPart[4] = {1,2,2,4};*/
-
- for (i = 0; i < 4 ; i++)
- {
- sub_mb_type[i] = mblock->subMbMode[i] - AVC_8x8;
- }
-
- return ;
-}
-
-void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type)
-{
- int i;
- /* see enum AVCMBType declaration */
- /* const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8,AVC_8x8,AVC_8x8,
- AVC_8x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_4x4,AVC_4x4,AVC_4x4};
- const static int map2subPartWidth[13] = {4,8,8,8,8,4,8,4,8,4,4,4,4};
- const static int map2subPartHeight[13] = {4,8,8,8,4,8,4,8,4,8,4,4,4};
- const static int map2numSubPart[13] = {4,1,1,1,2,2,2,2,2,2,4,4,4};
- const static int map2predMode[13] = {3,0,1,2,0,0,1,1,2,2,0,1,2};*/
-
- for (i = 0; i < 4 ; i++)
- {
- if (mblock->subMbMode[i] == AVC_BDirect8)
- {
- sub_mb_type[i] = 0;
- }
- else if (mblock->subMbMode[i] == AVC_8x8)
- {
- sub_mb_type[i] = 1 + mblock->MBPartPredMode[i][0];
- }
- else if (mblock->subMbMode[i] == AVC_4x4)
- {
- sub_mb_type[i] = 10 + mblock->MBPartPredMode[i][0];
- }
- else
- {
- sub_mb_type[i] = 4 + (mblock->MBPartPredMode[i][0] << 1) + (mblock->subMbMode[i] - AVC_8x4);
- }
- }
-
- return ;
-}
-
-/* see subclause 8.3.1 */
-AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
-{
- int intra4x4PredModeA = 0;
- int intra4x4PredModeB, predIntra4x4PredMode;
- int component, SubBlock_indx, block_x, block_y;
- int dcOnlyPredictionFlag;
- uint flag;
- int rem = 0;
- int mode;
- int bindx = 0;
-
- for (component = 0; component < 4; component++) /* partition index */
- {
- block_x = ((component & 1) << 1);
- block_y = ((component >> 1) << 1);
-
- for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */
- {
- dcOnlyPredictionFlag = 0;
- if (block_x > 0)
- {
- intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ];
- }
- else
- {
- if (video->intraAvailA)
- {
- if (video->mblock[video->mbAddrA].mbMode == AVC_I4)
- {
- intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3];
- }
- else
- {
- intra4x4PredModeA = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- }
- }
-
- if (block_y > 0)
- {
- intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x];
- }
- else
- {
- if (video->intraAvailB)
- {
- if (video->mblock[video->mbAddrB].mbMode == AVC_I4)
- {
- intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x];
- }
- else
- {
- intra4x4PredModeB = AVC_I4_DC;
- }
- }
- else
- {
- dcOnlyPredictionFlag = 1;
- }
- }
-
- if (dcOnlyPredictionFlag)
- {
- intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;
- }
-
- predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);
-
- flag = 0;
- mode = currMB->i4Mode[(block_y<<2)+block_x];
-
- if (mode == (AVCIntra4x4PredMode)predIntra4x4PredMode)
- {
- flag = 1;
- }
- else if (mode < predIntra4x4PredMode)
- {
- rem = mode;
- }
- else
- {
- rem = mode - 1;
- }
-
- BitstreamWrite1Bit(stream, flag);
-
- if (!flag)
- {
- BitstreamWriteBits(stream, 3, rem);
- }
-
- bindx++;
- block_y += (SubBlock_indx & 1) ;
- block_x += (1 - 2 * (SubBlock_indx & 1)) ;
- }
- }
-
- return AVCENC_SUCCESS;
-}
-
-
-
diff --git a/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp b/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp
deleted file mode 100644
index 222e709..0000000
--- a/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp
+++ /dev/null
@@ -1,336 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-#include "avcenc_lib.h"
-
-/**
-See algorithm in subclause 9.1, Table 9-1, Table 9-2. */
-AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum)
-{
- if (AVCENC_SUCCESS != SetEGBitstring(bitstream, codeNum))
- return AVCENC_FAIL;
-
- return AVCENC_SUCCESS;
-}
-
-/**
-See subclause 9.1.1, Table 9-3 */
-AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value)
-{
- uint codeNum;
- AVCEnc_Status status;
-
- if (value <= 0)
- {
- codeNum = -value * 2;
- }
- else
- {
- codeNum = value * 2 - 1;
- }
-
- status = ue_v(bitstream, codeNum);
-
- return status;
-}
-
-AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range)
-{
- AVCEnc_Status status;
-
- if (range > 1)
- {
- return ue_v(bitstream, value);
- }
- else
- {
- status = BitstreamWrite1Bit(bitstream, 1 - value);
- return status;
- }
-}
-
-/**
-See subclause 9.1, Table 9-1, 9-2. */
-// compute leadingZeros and inforbits
-//codeNum = (1<<leadingZeros)-1+infobits;
-AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum)
-{
- AVCEnc_Status status;
- int leadingZeros;
- int infobits;
-
- if (!codeNum)
- {
- status = BitstreamWrite1Bit(bitstream, 1);
- return status;
- }
-
- /* calculate leadingZeros and infobits */
- leadingZeros = 1;
- while ((uint)(1 << leadingZeros) < codeNum + 2)
- {
- leadingZeros++;
- }
- leadingZeros--;
- infobits = codeNum - (1 << leadingZeros) + 1;
-
- status = BitstreamWriteBits(bitstream, leadingZeros, 0);
- infobits |= (1 << leadingZeros);
- status = BitstreamWriteBits(bitstream, leadingZeros + 1, infobits);
- return status;
-}
-
-/* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */
-const static uint8 MapCBP2code[48][2] =
-{
- {3, 0}, {29, 2}, {30, 3}, {17, 7}, {31, 4}, {18, 8}, {37, 17}, {8, 13}, {32, 5}, {38, 18}, {19, 9}, {9, 14},
- {20, 10}, {10, 15}, {11, 16}, {2, 11}, {16, 1}, {33, 32}, {34, 33}, {21, 36}, {35, 34}, {22, 37}, {39, 44}, {4, 40},
- {36, 35}, {40, 45}, {23, 38}, {5, 41}, {24, 39}, {6, 42}, {7, 43}, {1, 19}, {41, 6}, {42, 24}, {43, 25}, {25, 20},
- {44, 26}, {26, 21}, {46, 46}, {12, 28}, {45, 27}, {47, 47}, {27, 22}, {13, 29}, {28, 23}, {14, 30}, {15, 31}, {0, 12}
-};
-
-AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream)
-{
- AVCEnc_Status status;
- uint codeNum;
-
- if (currMB->mbMode == AVC_I4)
- {
- codeNum = MapCBP2code[currMB->CBP][0];
- }
- else
- {
- codeNum = MapCBP2code[currMB->CBP][1];
- }
-
- status = ue_v(stream, codeNum);
-
- return status;
-}
-
-AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC)
-{
- const static uint8 totCoeffTrailOne[3][4][17][2] =
- {
- { // 0702
- {{1, 1}, {6, 5}, {8, 7}, {9, 7}, {10, 7}, {11, 7}, {13, 15}, {13, 11}, {13, 8}, {14, 15}, {14, 11}, {15, 15}, {15, 11}, {16, 15}, {16, 11}, {16, 7}, {16, 4}},
- {{0, 0}, {2, 1}, {6, 4}, {8, 6}, {9, 6}, {10, 6}, {11, 6}, {13, 14}, {13, 10}, {14, 14}, {14, 10}, {15, 14}, {15, 10}, {15, 1}, {16, 14}, {16, 10}, {16, 6}},
- {{0, 0}, {0, 0}, {3, 1}, {7, 5}, {8, 5}, {9, 5}, {10, 5}, {11, 5}, {13, 13}, {13, 9}, {14, 13}, {14, 9}, {15, 13}, {15, 9}, {16, 13}, {16, 9}, {16, 5}},
- {{0, 0}, {0, 0}, {0, 0}, {5, 3}, {6, 3}, {7, 4}, {8, 4}, {9, 4}, {10, 4}, {11, 4}, {13, 12}, {14, 12}, {14, 8}, {15, 12}, {15, 8}, {16, 12}, {16, 8}},
- },
- {
- {{2, 3}, {6, 11}, {6, 7}, {7, 7}, {8, 7}, {8, 4}, {9, 7}, {11, 15}, {11, 11}, {12, 15}, {12, 11}, {12, 8}, {13, 15}, {13, 11}, {13, 7}, {14, 9}, {14, 7}},
- {{0, 0}, {2, 2}, {5, 7}, {6, 10}, {6, 6}, {7, 6}, {8, 6}, {9, 6}, {11, 14}, {11, 10}, {12, 14}, {12, 10}, {13, 14}, {13, 10}, {14, 11}, {14, 8}, {14, 6}},
- {{0, 0}, {0, 0}, {3, 3}, {6, 9}, {6, 5}, {7, 5}, {8, 5}, {9, 5}, {11, 13}, {11, 9}, {12, 13}, {12, 9}, {13, 13}, {13, 9}, {13, 6}, {14, 10}, {14, 5}},
- {{0, 0}, {0, 0}, {0, 0}, {4, 5}, {4, 4}, {5, 6}, {6, 8}, {6, 4}, {7, 4}, {9, 4}, {11, 12}, {11, 8}, {12, 12}, {13, 12}, {13, 8}, {13, 1}, {14, 4}},
- },
- {
- {{4, 15}, {6, 15}, {6, 11}, {6, 8}, {7, 15}, {7, 11}, {7, 9}, {7, 8}, {8, 15}, {8, 11}, {9, 15}, {9, 11}, {9, 8}, {10, 13}, {10, 9}, {10, 5}, {10, 1}},
- {{0, 0}, {4, 14}, {5, 15}, {5, 12}, {5, 10}, {5, 8}, {6, 14}, {6, 10}, {7, 14}, {8, 14}, {8, 10}, {9, 14}, {9, 10}, {9, 7}, {10, 12}, {10, 8}, {10, 4}},
- {{0, 0}, {0, 0}, {4, 13}, {5, 14}, {5, 11}, {5, 9}, {6, 13}, {6, 9}, {7, 13}, {7, 10}, {8, 13}, {8, 9}, {9, 13}, {9, 9}, {10, 11}, {10, 7}, {10, 3}},
- {{0, 0}, {0, 0}, {0, 0}, {4, 12}, {4, 11}, {4, 10}, {4, 9}, {4, 8}, {5, 13}, {6, 12}, {7, 12}, {8, 12}, {8, 8}, {9, 12}, {10, 10}, {10, 6}, {10, 2}}
- }
- };
-
-
- AVCEnc_Status status = AVCENC_SUCCESS;
- uint code, len;
- int vlcnum;
-
- if (TrailingOnes > 3)
- {
- return AVCENC_TRAILINGONES_FAIL;
- }
-
- if (nC >= 8)
- {
- if (TotalCoeff)
- {
- code = ((TotalCoeff - 1) << 2) | (TrailingOnes);
- }
- else
- {
- code = 3;
- }
- status = BitstreamWriteBits(stream, 6, code);
- }
- else
- {
- if (nC < 2)
- {
- vlcnum = 0;
- }
- else if (nC < 4)
- {
- vlcnum = 1;
- }
- else
- {
- vlcnum = 2;
- }
-
- len = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][0];
- code = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][1];
- status = BitstreamWriteBits(stream, len, code);
- }
-
- return status;
-}
-
-AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff)
-{
- const static uint8 totCoeffTrailOneChrom[4][5][2] =
- {
- { {2, 1}, {6, 7}, {6, 4}, {6, 3}, {6, 2}},
- { {0, 0}, {1, 1}, {6, 6}, {7, 3}, {8, 3}},
- { {0, 0}, {0, 0}, {3, 1}, {7, 2}, {8, 2}},
- { {0, 0}, {0, 0}, {0, 0}, {6, 5}, {7, 0}},
- };
-
- AVCEnc_Status status = AVCENC_SUCCESS;
- uint code, len;
-
- len = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][0];
- code = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][1];
- status = BitstreamWriteBits(stream, len, code);
-
- return status;
-}
-
-/* see Table 9-7 and 9-8 */
-AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)
-{
- const static uint8 lenTotalZeros[15][16] =
- {
- { 1, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9},
- { 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6},
- { 4, 3, 3, 3, 4, 4, 3, 3, 4, 5, 5, 6, 5, 6},
- { 5, 3, 4, 4, 3, 3, 3, 4, 3, 4, 5, 5, 5},
- { 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 4, 5},
- { 6, 5, 3, 3, 3, 3, 3, 3, 4, 3, 6},
- { 6, 5, 3, 3, 3, 2, 3, 4, 3, 6},
- { 6, 4, 5, 3, 2, 2, 3, 3, 6},
- { 6, 6, 4, 2, 2, 3, 2, 5},
- { 5, 5, 3, 2, 2, 2, 4},
- { 4, 4, 3, 3, 1, 3},
- { 4, 4, 2, 1, 3},
- { 3, 3, 1, 2},
- { 2, 2, 1},
- { 1, 1},
- };
-
- const static uint8 codTotalZeros[15][16] =
- {
- {1, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 1},
- {7, 6, 5, 4, 3, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0},
- {5, 7, 6, 5, 4, 3, 4, 3, 2, 3, 2, 1, 1, 0},
- {3, 7, 5, 4, 6, 5, 4, 3, 3, 2, 2, 1, 0},
- {5, 4, 3, 7, 6, 5, 4, 3, 2, 1, 1, 0},
- {1, 1, 7, 6, 5, 4, 3, 2, 1, 1, 0},
- {1, 1, 5, 4, 3, 3, 2, 1, 1, 0},
- {1, 1, 1, 3, 3, 2, 2, 1, 0},
- {1, 0, 1, 3, 2, 1, 1, 1, },
- {1, 0, 1, 3, 2, 1, 1, },
- {0, 1, 1, 2, 1, 3},
- {0, 1, 1, 1, 1},
- {0, 1, 1, 1},
- {0, 1, 1},
- {0, 1},
- };
- int len, code;
- AVCEnc_Status status;
-
- len = lenTotalZeros[TotalCoeff-1][total_zeros];
- code = codTotalZeros[TotalCoeff-1][total_zeros];
-
- status = BitstreamWriteBits(stream, len, code);
-
- return status;
-}
-
-/* see Table 9-9 */
-AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)
-{
- const static uint8 lenTotalZerosChromaDC[3][4] =
- {
- { 1, 2, 3, 3, },
- { 1, 2, 2, 0, },
- { 1, 1, 0, 0, },
- };
-
- const static uint8 codTotalZerosChromaDC[3][4] =
- {
- { 1, 1, 1, 0, },
- { 1, 1, 0, 0, },
- { 1, 0, 0, 0, },
- };
-
- int len, code;
- AVCEnc_Status status;
-
- len = lenTotalZerosChromaDC[TotalCoeff-1][total_zeros];
- code = codTotalZerosChromaDC[TotalCoeff-1][total_zeros];
-
- status = BitstreamWriteBits(stream, len, code);
-
- return status;
-}
-
-/* see Table 9-10 */
-AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft)
-{
- const static uint8 lenRunBefore[7][16] =
- {
- {1, 1},
- {1, 2, 2},
- {2, 2, 2, 2},
- {2, 2, 2, 3, 3},
- {2, 2, 3, 3, 3, 3},
- {2, 3, 3, 3, 3, 3, 3},
- {3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11},
- };
-
- const static uint8 codRunBefore[7][16] =
- {
- {1, 0},
- {1, 1, 0},
- {3, 2, 1, 0},
- {3, 2, 1, 1, 0},
- {3, 2, 3, 2, 1, 0},
- {3, 0, 1, 3, 2, 5, 4},
- {7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1},
- };
-
- int len, code;
- AVCEnc_Status status;
-
- if (zerosLeft <= 6)
- {
- len = lenRunBefore[zerosLeft-1][run_before];
- code = codRunBefore[zerosLeft-1][run_before];
- }
- else
- {
- len = lenRunBefore[6][run_before];
- code = codRunBefore[6][run_before];
- }
-
- status = BitstreamWriteBits(stream, len, code);
-
-
- return status;
-}
diff --git a/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp b/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp
deleted file mode 100644
index 7a782a8..0000000
--- a/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <assert.h>
-#include <stdlib.h>
-
-#include "avcenc_api.h"
-#include "avcenc_int.h"
-
-// Constants.
-enum {
- kMaxWidth = 720,
- kMaxHeight = 480,
- kMaxFrameRate = 30,
- kMaxBitrate = 2048, // in kbps.
- kInputBufferSize = (kMaxWidth * kMaxHeight * 3) / 2, // For YUV 420 format.
- kOutputBufferSize = kInputBufferSize,
- kMaxDpbBuffers = 17,
- kIDRFrameRefreshIntervalInSec = 1,
-};
-
-
-static void *MallocCb(void * /*userData*/, int32_t size, int32_t /*attrs*/) {
- void *ptr = calloc(size, 1);
- return ptr;
-}
-
-static void FreeCb(void * /*userData*/, void *ptr) {
- free(ptr);
-}
-
-static int32_t DpbAllocCb(void * /*userData*/,
- unsigned int sizeInMbs, unsigned int numBuffers) {
-
- size_t frameSize = (sizeInMbs << 7) * 3;
- if(numBuffers < kMaxDpbBuffers && frameSize <= kInputBufferSize) {
- return 1;
- } else {
- return 0;
- }
-}
-
-static int32_t BindFrameCb(void *userData, int32_t index, uint8_t **yuv) {
- assert(index < kMaxDpbBuffers);
- uint8_t** dpbBuffer = static_cast<uint8_t**>(userData);
- *yuv = dpbBuffer[index];
- return 1;
-}
-
-static void UnbindFrameCb(void * /*userData*/, int32_t /*index*/) {
-}
-
-int main(int argc, char *argv[]) {
-
- if (argc < 7) {
- fprintf(stderr, "Usage %s <input yuv> <output file> <width> <height>"
- " <frame rate> <bitrate in kbps>\n", argv[0]);
- fprintf(stderr, "Max width %d\n", kMaxWidth);
- fprintf(stderr, "Max height %d\n", kMaxHeight);
- fprintf(stderr, "Max framerate %d\n", kMaxFrameRate);
- fprintf(stderr, "Max bitrate %d kbps\n", kMaxBitrate);
- return EXIT_FAILURE;
- }
-
- // Read height and width.
- int32_t width;
- int32_t height;
- width = atoi(argv[3]);
- height = atoi(argv[4]);
- if (width > kMaxWidth || height > kMaxHeight || width <= 0 || height <= 0) {
- fprintf(stderr, "Unsupported dimensions %dx%d\n", width, height);
- return EXIT_FAILURE;
- }
-
- if (width % 16 != 0 || height % 16 != 0) {
- fprintf(stderr, "Video frame size %dx%d must be a multiple of 16\n",
- width, height);
- return EXIT_FAILURE;
- }
-
- // Read frame rate.
- int32_t frameRate;
- frameRate = atoi(argv[5]);
- if (frameRate > kMaxFrameRate || frameRate <= 0) {
- fprintf(stderr, "Unsupported frame rate %d\n", frameRate);
- return EXIT_FAILURE;
- }
-
- // Read bit rate.
- int32_t bitrate;
- bitrate = atoi(argv[6]);
- if (bitrate > kMaxBitrate || bitrate <= 0) {
- fprintf(stderr, "Unsupported bitrate %d\n", bitrate);
- return EXIT_FAILURE;
- }
- bitrate *= 1024; // kbps to bps.
-
- // Open the input file.
- FILE *fpInput = fopen(argv[1], "rb");
- if (!fpInput) {
- fprintf(stderr, "Could not open %s\n", argv[1]);
- return EXIT_FAILURE;
- }
-
- // Open the output file.
- FILE *fpOutput = fopen(argv[2], "wb");
- if (!fpOutput) {
- fprintf(stderr, "Could not open %s\n", argv[2]);
- fclose(fpInput);
- return EXIT_FAILURE;
- }
-
- // Allocate input buffer.
- uint8_t *inputBuf = (uint8_t *)malloc(kInputBufferSize);
- assert(inputBuf != NULL);
-
- // Allocate output buffer.
- uint8_t *outputBuf = (uint8_t *)malloc(kOutputBufferSize);
- assert(outputBuf != NULL);
-
- // Allocate dpb buffers.
- uint8_t * dpbBuffers[kMaxDpbBuffers];
- for (int i = 0; i < kMaxDpbBuffers; ++i) {
- dpbBuffers[i] = (uint8_t *)malloc(kInputBufferSize);
- assert(dpbBuffers[i] != NULL);
- }
-
- // Initialize the encoder parameters.
- tagAVCEncParam encParams;
- memset(&encParams, 0, sizeof(tagAVCEncParam));
- encParams.rate_control = AVC_ON;
- encParams.initQP = 0;
- encParams.init_CBP_removal_delay = 1600;
-
- encParams.intramb_refresh = 0;
- encParams.auto_scd = AVC_ON;
- encParams.out_of_band_param_set = AVC_ON;
- encParams.poc_type = 2;
- encParams.log2_max_poc_lsb_minus_4 = 12;
- encParams.delta_poc_zero_flag = 0;
- encParams.offset_poc_non_ref = 0;
- encParams.offset_top_bottom = 0;
- encParams.num_ref_in_cycle = 0;
- encParams.offset_poc_ref = NULL;
-
- encParams.num_ref_frame = 1;
- encParams.num_slice_group = 1;
- encParams.fmo_type = 0;
-
- encParams.db_filter = AVC_ON;
- encParams.disable_db_idc = 0;
-
- encParams.alpha_offset = 0;
- encParams.beta_offset = 0;
- encParams.constrained_intra_pred = AVC_OFF;
-
- encParams.data_par = AVC_OFF;
- encParams.fullsearch = AVC_OFF;
- encParams.search_range = 16;
- encParams.sub_pel = AVC_OFF;
- encParams.submb_pred = AVC_OFF;
- encParams.rdopt_mode = AVC_OFF;
- encParams.bidir_pred = AVC_OFF;
-
- encParams.use_overrun_buffer = AVC_OFF;
-
- encParams.width = width;
- encParams.height = height;
- encParams.bitrate = bitrate;
- encParams.frame_rate = 1000 * frameRate; // In frames/ms.
- encParams.CPB_size = (uint32_t) (bitrate >> 1);
-
- int32_t IDRFrameRefreshIntervalInSec = kIDRFrameRefreshIntervalInSec;
- if (IDRFrameRefreshIntervalInSec == 0) {
- encParams.idr_period = 1; // All I frames.
- } else {
- encParams.idr_period = (IDRFrameRefreshIntervalInSec * frameRate);
- }
-
- int32_t nMacroBlocks = ((((width + 15) >> 4) << 4) *
- (((height + 15) >> 4) << 4)) >> 8;
- uint32_t *sliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
- assert(sliceGroup != NULL);
- for (int i = 0, idx = 0; i < nMacroBlocks; ++i) {
- sliceGroup[i] = idx++;
- if (idx >= encParams.num_slice_group) {
- idx = 0;
- }
- }
- encParams.slice_group = sliceGroup;
- encParams.profile = AVC_BASELINE;
- encParams.level = AVC_LEVEL2;
-
- // Initialize the handle.
- tagAVCHandle handle;
- memset(&handle, 0, sizeof(tagAVCHandle));
- handle.AVCObject = NULL;
- handle.userData = dpbBuffers;
- handle.CBAVC_DPBAlloc = DpbAllocCb;
- handle.CBAVC_FrameBind = BindFrameCb;
- handle.CBAVC_FrameUnbind = UnbindFrameCb;
- handle.CBAVC_Malloc = MallocCb;
- handle.CBAVC_Free = FreeCb;
-
- // Initialize the encoder.
- AVCEnc_Status status;
- status = PVAVCEncInitialize(&handle, &encParams, NULL, NULL);
- if (status != AVCENC_SUCCESS) {
- fprintf(stderr, "Failed to initialize the encoder\n");
-
- // Release resources.
- fclose(fpInput);
- fclose(fpOutput);
- free(sliceGroup);
- free(inputBuf);
- free(outputBuf);
- for (int i = 0; i < kMaxDpbBuffers; ++i) {
- free(dpbBuffers[i]);
- }
- return EXIT_FAILURE;
- }
-
- // Encode Sequence Parameter Set.
- uint32_t dataLength = kOutputBufferSize;
- int32_t type;
- status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
- assert(type == AVC_NALTYPE_SPS);
- fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
- fwrite(outputBuf, 1, dataLength, fpOutput); // SPS.
-
- // Encode Picture Paramater Set.
- dataLength = kOutputBufferSize;
- status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
- assert(type == AVC_NALTYPE_PPS);
- fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
- fwrite(outputBuf, 1, dataLength, fpOutput); // PPS.
-
- // Core loop.
- int32_t retVal = EXIT_SUCCESS;
- int32_t frameSize = (width * height * 3) / 2;
- int32_t numInputFrames = 0;
- int32_t numNalEncoded = 0;
- bool readyForNextFrame = true;
-
- while (1) {
- if (readyForNextFrame == true) {
- // Read the input frame.
- int32_t bytesRead;
- bytesRead = fread(inputBuf, 1, frameSize, fpInput);
- if (bytesRead != frameSize) {
- break; // End of file.
- }
-
- // Set the input frame.
- AVCFrameIO vin;
- memset(&vin, 0, sizeof(vin));
- vin.height = ((height + 15) >> 4) << 4;
- vin.pitch = ((width + 15) >> 4) << 4;
- vin.coding_timestamp = (numInputFrames * 1000) / frameRate; // in ms
- vin.YCbCr[0] = inputBuf;
- vin.YCbCr[1] = vin.YCbCr[0] + vin.height * vin.pitch;
- vin.YCbCr[2] = vin.YCbCr[1] + ((vin.height * vin.pitch) >> 2);
- vin.disp_order = numInputFrames;
-
- status = PVAVCEncSetInput(&handle, &vin);
- if (status == AVCENC_SUCCESS || status == AVCENC_NEW_IDR) {
- readyForNextFrame = false;
- ++numInputFrames;
- } else if (status < AVCENC_SUCCESS) {
- fprintf(stderr, "Error %d while setting input frame\n", status);
- retVal = EXIT_FAILURE;
- break;
- } else {
- fprintf(stderr, "Frame drop\n");
- readyForNextFrame = true;
- ++numInputFrames;
- continue;
- }
- }
-
- // Encode the input frame.
- dataLength = kOutputBufferSize;
- status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
- if (status == AVCENC_SUCCESS) {
- PVAVCEncGetOverrunBuffer(&handle);
- } else if (status == AVCENC_PICTURE_READY) {
- PVAVCEncGetOverrunBuffer(&handle);
- readyForNextFrame = true;
- AVCFrameIO recon;
- if (PVAVCEncGetRecon(&handle, &recon) == AVCENC_SUCCESS) {
- PVAVCEncReleaseRecon(&handle, &recon);
- }
- } else {
- dataLength = 0;
- readyForNextFrame = true;
- }
-
- if (status < AVCENC_SUCCESS) {
- fprintf(stderr, "Error %d while encoding frame\n", status);
- retVal = EXIT_FAILURE;
- break;
- }
-
- numNalEncoded++;
-
- // Write the output.
- if (dataLength > 0) {
- fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
- fwrite(outputBuf, 1, dataLength, fpOutput); // NAL.
- printf("NAL %d of size %d written\n", numNalEncoded, dataLength + 4);
- }
- }
-
- // Close input and output file.
- fclose(fpInput);
- fclose(fpOutput);
-
- // Free allocated memory.
- free(sliceGroup);
- free(inputBuf);
- free(outputBuf);
- for (int i = 0; i < kMaxDpbBuffers; ++i) {
- free(dpbBuffers[i]);
- }
-
- // Close encoder instance.
- PVAVCCleanUpEncoder(&handle);
-
- return retVal;
-}
diff --git a/media/libstagefright/codecs/avc/patent_disclaimer.txt b/media/libstagefright/codecs/avc/patent_disclaimer.txt
deleted file mode 100644
index b4bf11d..0000000
--- a/media/libstagefright/codecs/avc/patent_disclaimer.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-
-THIS IS NOT A GRANT OF PATENT RIGHTS.
-
-Google makes no representation or warranty that the codecs for which
-source code is made available hereunder are unencumbered by
-third-party patents. Those intending to use this source code in
-hardware or software products are advised that implementations of
-these codecs, including in open source software or shareware, may
-require patent licenses from the relevant patent holders.
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
index fbc7be1..877723d 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
@@ -15,6 +15,10 @@
* and limitations under the License.
* -------------------------------------------------------------------
*/
+
+#define LOG_TAG "m4v_h263"
+#include <log/log.h>
+
/*
------------------------------------------------------------------------------
INPUT AND OUTPUT DEFINITIONS
@@ -236,6 +240,11 @@
/* Pointer to previous luminance frame */
c_prev = prev->yChan;
+ if (!c_prev) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return;
+ }
pred_block = video->mblock->pred_block;
@@ -574,7 +583,14 @@
/* zero motion compensation for previous frame */
/*mby*width + mbx;*/
- c_prev = prev->yChan + offset;
+ c_prev = prev->yChan;
+ if (!c_prev) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return;
+ }
+ c_prev += offset;
+
/*by*width_uv + bx;*/
cu_prev = prev->uChan + (offset >> 2) + (xpos >> 2);
/*by*width_uv + bx;*/
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index c1720c6..8d5d071 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -15,6 +15,8 @@
* and limitations under the License.
* -------------------------------------------------------------------
*/
+#define LOG_TAG "pvdec_api"
+#include <log/log.h>
#include "mp4dec_lib.h"
#include "vlc_decode.h"
#include "bitstream.h"
@@ -1335,6 +1337,11 @@
}
}
+ if (!video->prevVop->yChan) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return PV_FALSE;
+ }
oscl_memcpy(currVop->yChan, video->prevVop->yChan, (decCtrl->size*3) / 2);
video->prevVop = prevVop;
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 9105084..8d1ad66 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -379,7 +379,7 @@
flags &= ~1;
}
- if (flags & 2) {
+ if ((flags & 2) && (dataSize >= 2)) {
// This file has "unsynchronization", so we have to replace occurrences
// of 0xff 0x00 with just 0xff in order to get the real data.
@@ -395,11 +395,15 @@
mData[writeOffset++] = mData[readOffset++];
}
// move the remaining data following this frame
- memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
+ if (readOffset <= oldSize) {
+ memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
+ } else {
+ ALOGE("b/34618607 (%zu %zu %zu %zu)", readOffset, writeOffset, oldSize, mSize);
+ android_errorWriteLog(0x534e4554, "34618607");
+ }
- flags &= ~2;
}
-
+ flags &= ~2;
if (flags != prevFlags || iTunesHack) {
WriteSyncsafeInteger(&mData[offset + 4], dataSize);
mData[offset + 8] = flags >> 8;
diff --git a/media/libstagefright/include/MediaCodec.h b/media/libstagefright/include/MediaCodec.h
index 30454dc..4140266 100644
--- a/media/libstagefright/include/MediaCodec.h
+++ b/media/libstagefright/include/MediaCodec.h
@@ -186,7 +186,7 @@
status_t getName(AString *componentName) const;
- status_t getMetrics(Parcel *reply);
+ status_t getMetrics(MediaAnalyticsItem * &reply);
status_t setParameters(const sp<AMessage> ¶ms);
diff --git a/media/libstagefright/omx/hal/1.0/impl/Conversion.h b/media/libstagefright/omx/1.0/Conversion.h
similarity index 90%
rename from media/libstagefright/omx/hal/1.0/impl/Conversion.h
rename to media/libstagefright/omx/1.0/Conversion.h
index a6fed2e..fd91574 100644
--- a/media/libstagefright/omx/hal/1.0/impl/Conversion.h
+++ b/media/libstagefright/omx/1.0/Conversion.h
@@ -37,14 +37,13 @@
#include <VideoAPI.h>
#include <android/hidl/memory/1.0/IMemory.h>
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
#include <android/hardware/media/omx/1.0/types.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
-#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
-#include <android/hardware/media/omx/1.0/IOmxObserver.h>
-#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
#include <android/IGraphicBufferSource.h>
#include <android/IOMXBufferSource.h>
@@ -95,8 +94,10 @@
using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
using ::android::IOMXBufferSource;
-using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
-using ::android::IGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer
+ HGraphicBufferProducer;
+typedef ::android::IGraphicBufferProducer
+ BGraphicBufferProducer;
// native_handle_t helper functions.
@@ -1191,14 +1192,14 @@
* \return The required size of the flat buffer.
*/
inline size_t getFlattenedSize(
- IOmxBufferProducer::FenceTimeSnapshot const& t) {
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
constexpr size_t min = sizeof(t.state);
switch (t.state) {
- case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
return min;
- case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
return min + getFenceFlattenedSize(t.fence);
- case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
return min + sizeof(
::android::FenceTime::Snapshot::signalTime);
}
@@ -1213,9 +1214,9 @@
* \return The number of file descriptors contained in \p snapshot.
*/
inline size_t getFdCount(
- IOmxBufferProducer::FenceTimeSnapshot const& t) {
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
return t.state ==
- IOmxBufferProducer::FenceTimeSnapshot::State::FENCE ?
+ HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE ?
getFenceFdCount(t.fence) : 0;
}
@@ -1232,22 +1233,22 @@
* This function will duplicate the file descriptor in `t.fence` if `t.state ==
* FENCE`.
*/
-inline status_t flatten(IOmxBufferProducer::FenceTimeSnapshot const& t,
+inline status_t flatten(HGraphicBufferProducer::FenceTimeSnapshot const& t,
void*& buffer, size_t& size, int*& fds, size_t& numFds) {
if (size < getFlattenedSize(t)) {
return NO_MEMORY;
}
switch (t.state) {
- case IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
FlattenableUtils::write(buffer, size,
::android::FenceTime::Snapshot::State::EMPTY);
return NO_ERROR;
- case IOmxBufferProducer::FenceTimeSnapshot::State::FENCE:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
FlattenableUtils::write(buffer, size,
::android::FenceTime::Snapshot::State::FENCE);
return flattenFence(t.fence, buffer, size, fds, numFds);
- case IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
FlattenableUtils::write(buffer, size,
::android::FenceTime::Snapshot::State::SIGNAL_TIME);
FlattenableUtils::write(buffer, size, t.signalTimeNs);
@@ -1272,7 +1273,7 @@
* case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
*/
inline status_t unflatten(
- IOmxBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+ HGraphicBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
if (size < sizeof(t->state)) {
return NO_MEMORY;
@@ -1283,13 +1284,13 @@
FlattenableUtils::read(buffer, size, state);
switch (state) {
case ::android::FenceTime::Snapshot::State::EMPTY:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::EMPTY;
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY;
return NO_ERROR;
case ::android::FenceTime::Snapshot::State::FENCE:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::FENCE;
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE;
return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
- t->state = IOmxBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
if (size < sizeof(t->signalTimeNs)) {
return NO_MEMORY;
}
@@ -1309,7 +1310,7 @@
* \return A lower bound on the size of the flat buffer.
*/
constexpr size_t minFlattenedSize(
- IOmxBufferProducer::FrameEventsDelta const& /* t */) {
+ HGraphicBufferProducer::FrameEventsDelta const& /* t */) {
return sizeof(uint64_t) + // mFrameNumber
sizeof(uint8_t) + // mIndex
sizeof(uint8_t) + // mAddPostCompositeCalled
@@ -1330,7 +1331,7 @@
* \return The required size of the flat buffer.
*/
inline size_t getFlattenedSize(
- IOmxBufferProducer::FrameEventsDelta const& t) {
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
return minFlattenedSize(t) +
getFlattenedSize(t.gpuCompositionDoneFence) +
getFlattenedSize(t.displayPresentFence) +
@@ -1346,7 +1347,7 @@
* \return The number of file descriptors contained in \p t.
*/
inline size_t getFdCount(
- IOmxBufferProducer::FrameEventsDelta const& t) {
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
return getFdCount(t.gpuCompositionDoneFence) +
getFdCount(t.displayPresentFence) +
getFdCount(t.displayRetireFence) +
@@ -1368,7 +1369,7 @@
* populated with `nullptr` or newly created handles. Each non-null slot in \p
* nh will need to be deleted manually with `native_handle_delete()`.
*/
-inline status_t unflatten(IOmxBufferProducer::FrameEventsDelta* t,
+inline status_t unflatten(HGraphicBufferProducer::FrameEventsDelta* t,
std::vector<native_handle_t*>* nh,
void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
if (size < minFlattenedSize(*t)) {
@@ -1400,7 +1401,7 @@
FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
// Fences
- IOmxBufferProducer::FenceTimeSnapshot* tSnapshot[4];
+ HGraphicBufferProducer::FenceTimeSnapshot* tSnapshot[4];
tSnapshot[0] = &t->gpuCompositionDoneFence;
tSnapshot[1] = &t->displayPresentFence;
tSnapshot[2] = &t->displayRetireFence;
@@ -1437,7 +1438,7 @@
*/
// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
// FrameEventsDelta::flatten
-inline status_t flatten(IOmxBufferProducer::FrameEventsDelta const& t,
+inline status_t flatten(HGraphicBufferProducer::FrameEventsDelta const& t,
void*& buffer, size_t& size, int*& fds, size_t numFds) {
// Check that t.index is within a valid range.
if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
@@ -1464,7 +1465,7 @@
FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
// Fences
- IOmxBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
+ HGraphicBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
tSnapshot[0] = &t.gpuCompositionDoneFence;
tSnapshot[1] = &t.displayPresentFence;
tSnapshot[2] = &t.displayRetireFence;
@@ -1483,13 +1484,13 @@
/**
* \brief Return the size of the non-fd buffer required to flatten
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
*
- * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
* \return The required size of the flat buffer.
*/
inline size_t getFlattenedSize(
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
size_t size = 4 + // mDeltas.size()
sizeof(t.compositorTiming);
for (size_t i = 0; i < t.deltas.size(); ++i) {
@@ -1500,13 +1501,13 @@
/**
* \brief Return the number of file descriptors contained in
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
*
- * \param[in] t The input `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
* \return The number of file descriptors contained in \p t.
*/
inline size_t getFdCount(
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
size_t numFds = 0;
for (size_t i = 0; i < t.deltas.size(); ++i) {
numFds += getFdCount(t.deltas[i]);
@@ -1530,7 +1531,7 @@
* slot in \p nh will need to be deleted manually with `native_handle_delete()`.
*/
inline status_t unflatten(
- IOmxBufferProducer::FrameEventHistoryDelta* t,
+ HGraphicBufferProducer::FrameEventHistoryDelta* t,
std::vector<std::vector<native_handle_t*> >* nh,
void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
if (size < 4) {
@@ -1571,7 +1572,7 @@
* This function will duplicate file descriptors contained in \p t.
*/
inline status_t flatten(
- IOmxBufferProducer::FrameEventHistoryDelta const& t,
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t,
void*& buffer, size_t& size, int*& fds, size_t& numFds) {
if (t.deltas.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
return BAD_VALUE;
@@ -1594,10 +1595,10 @@
/**
* \brief Wrap `::android::FrameEventHistoryData` in
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
*
* \param[out] t The wrapper of type
- * `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
* \param[out] nh The array of array of native handles that are referred to by
* members of \p t.
* \param[in] l The source `::android::FrameEventHistoryDelta`.
@@ -1606,7 +1607,7 @@
* native handle. All the non-`nullptr` elements must be deleted individually
* with `native_handle_delete()`.
*/
-inline bool wrapAs(IOmxBufferProducer::FrameEventHistoryDelta* t,
+inline bool wrapAs(HGraphicBufferProducer::FrameEventHistoryDelta* t,
std::vector<std::vector<native_handle_t*> >* nh,
::android::FrameEventHistoryDelta const& l) {
@@ -1644,17 +1645,17 @@
}
/**
- * \brief Convert `IOmxBufferProducer::FrameEventHistoryDelta` to
+ * \brief Convert `HGraphicBufferProducer::FrameEventHistoryDelta` to
* `::android::FrameEventHistoryDelta`.
*
* \param[out] l The destination `::android::FrameEventHistoryDelta`.
- * \param[in] t The source `IOmxBufferProducer::FrameEventHistoryDelta`.
+ * \param[in] t The source `HGraphicBufferProducer::FrameEventHistoryDelta`.
*
* This function will duplicate all file descriptors contained in \p t.
*/
inline bool convertTo(
::android::FrameEventHistoryDelta* l,
- IOmxBufferProducer::FrameEventHistoryDelta const& t) {
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
size_t const baseSize = getFlattenedSize(t);
std::unique_ptr<uint8_t[]> baseBuffer(
@@ -1829,18 +1830,18 @@
return true;
}
-// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
-// IGraphicBufferProducer::QueueBufferInput
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferInput
/**
* \brief Return a lower bound on the size of the buffer required to flatten
- * `IOmxBufferProducer::QueueBufferInput`.
+ * `HGraphicBufferProducer::QueueBufferInput`.
*
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
* \return A lower bound on the size of the flat buffer.
*/
constexpr size_t minFlattenedSize(
- IOmxBufferProducer::QueueBufferInput const& /* t */) {
+ HGraphicBufferProducer::QueueBufferInput const& /* t */) {
return sizeof(int64_t) + // timestamp
sizeof(int) + // isAutoTimestamp
sizeof(android_dataspace) + // dataSpace
@@ -1853,12 +1854,12 @@
/**
* \brief Return the size of the buffer required to flatten
- * `IOmxBufferProducer::QueueBufferInput`.
+ * `HGraphicBufferProducer::QueueBufferInput`.
*
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
* \return The required size of the flat buffer.
*/
-inline size_t getFlattenedSize(IOmxBufferProducer::QueueBufferInput const& t) {
+inline size_t getFlattenedSize(HGraphicBufferProducer::QueueBufferInput const& t) {
return minFlattenedSize(t) +
getFenceFlattenedSize(t.fence) +
getFlattenedSize(t.surfaceDamage);
@@ -1866,20 +1867,20 @@
/**
* \brief Return the number of file descriptors contained in
- * `IOmxBufferProducer::QueueBufferInput`.
+ * `HGraphicBufferProducer::QueueBufferInput`.
*
- * \param[in] t The input `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
* \return The number of file descriptors contained in \p t.
*/
inline size_t getFdCount(
- IOmxBufferProducer::QueueBufferInput const& t) {
+ HGraphicBufferProducer::QueueBufferInput const& t) {
return getFenceFdCount(t.fence);
}
/**
- * \brief Flatten `IOmxBufferProducer::QueueBufferInput`.
+ * \brief Flatten `HGraphicBufferProducer::QueueBufferInput`.
*
- * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
* \param[out] nh The native handle cloned from `t.fence`.
* \param[in,out] buffer The pointer to the flat non-fd buffer.
* \param[in,out] size The size of the flat non-fd buffer.
@@ -1888,7 +1889,7 @@
* \return `NO_ERROR` on success; other value on failure.
*
* This function will duplicate the file descriptor in `t.fence`. */
-inline status_t flatten(IOmxBufferProducer::QueueBufferInput const& t,
+inline status_t flatten(HGraphicBufferProducer::QueueBufferInput const& t,
native_handle_t** nh,
void*& buffer, size_t& size, int*& fds, size_t& numFds) {
if (size < getFlattenedSize(t)) {
@@ -1919,9 +1920,9 @@
}
/**
- * \brief Unflatten `IOmxBufferProducer::QueueBufferInput`.
+ * \brief Unflatten `HGraphicBufferProducer::QueueBufferInput`.
*
- * \param[out] t The destination `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] t The destination `HGraphicBufferProducer::QueueBufferInput`.
* \param[out] nh The underlying native handle for `t->fence`.
* \param[in,out] buffer The pointer to the flat non-fd buffer.
* \param[in,out] size The size of the flat non-fd buffer.
@@ -1935,7 +1936,7 @@
* afterwards.
*/
inline status_t unflatten(
- IOmxBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+ HGraphicBufferProducer::QueueBufferInput* t, native_handle_t** nh,
void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
if (size < minFlattenedSize(*t)) {
return NO_MEMORY;
@@ -1971,13 +1972,13 @@
}
/**
- * \brief Wrap `IGraphicBufferProducer::QueueBufferInput` in
- * `IOmxBufferProducer::QueueBufferInput`.
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferInput` in
+ * `HGraphicBufferProducer::QueueBufferInput`.
*
* \param[out] t The wrapper of type
- * `IOmxBufferProducer::QueueBufferInput`.
+ * `HGraphicBufferProducer::QueueBufferInput`.
* \param[out] nh The underlying native handle for `t->fence`.
- * \param[in] l The source `IGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferInput`.
*
* If the return value is `true` and `t->fence` contains a valid file
* descriptor, \p nh will be a newly created native handle holding that file
@@ -1985,9 +1986,9 @@
* afterwards.
*/
inline bool wrapAs(
- IOmxBufferProducer::QueueBufferInput* t,
+ HGraphicBufferProducer::QueueBufferInput* t,
native_handle_t** nh,
- IGraphicBufferProducer::QueueBufferInput const& l) {
+ BGraphicBufferProducer::QueueBufferInput const& l) {
size_t const baseSize = l.getFlattenedSize();
std::unique_ptr<uint8_t[]> baseBuffer(
@@ -2023,17 +2024,17 @@
}
/**
- * \brief Convert `IOmxBufferProducer::QueueBufferInput` to
- * `IGraphicBufferProducer::QueueBufferInput`.
+ * \brief Convert `HGraphicBufferProducer::QueueBufferInput` to
+ * `BGraphicBufferProducer::QueueBufferInput`.
*
- * \param[out] l The destination `IGraphicBufferProducer::QueueBufferInput`.
- * \param[in] t The source `IOmxBufferProducer::QueueBufferInput`.
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
*
* If `t.fence` has a valid file descriptor, it will be duplicated.
*/
inline bool convertTo(
- IGraphicBufferProducer::QueueBufferInput* l,
- IOmxBufferProducer::QueueBufferInput const& t) {
+ BGraphicBufferProducer::QueueBufferInput* l,
+ HGraphicBufferProducer::QueueBufferInput const& t) {
size_t const baseSize = getFlattenedSize(t);
std::unique_ptr<uint8_t[]> baseBuffer(
@@ -2072,28 +2073,28 @@
return true;
}
-// Ref: frameworks/native/libs/gui/IGraphicBufferProducer.cpp:
-// IGraphicBufferProducer::QueueBufferOutput
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferOutput
/**
- * \brief Wrap `IGraphicBufferProducer::QueueBufferOutput` in
- * `IOmxBufferProducer::QueueBufferOutput`.
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferOutput` in
+ * `HGraphicBufferProducer::QueueBufferOutput`.
*
* \param[out] t The wrapper of type
- * `IOmxBufferProducer::QueueBufferOutput`.
+ * `HGraphicBufferProducer::QueueBufferOutput`.
* \param[out] nh The array of array of native handles that are referred to by
* members of \p t.
- * \param[in] l The source `IGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferOutput`.
*
* On success, each member of \p nh will be either `nullptr` or a newly created
* native handle. All the non-`nullptr` elements must be deleted individually
* with `native_handle_delete()`.
*/
-// wrap: IGraphicBufferProducer::QueueBufferOutput ->
-// IOmxBufferProducer::QueueBufferOutput
-inline bool wrapAs(IOmxBufferProducer::QueueBufferOutput* t,
+// wrap: BGraphicBufferProducer::QueueBufferOutput ->
+// HGraphicBufferProducer::QueueBufferOutput
+inline bool wrapAs(HGraphicBufferProducer::QueueBufferOutput* t,
std::vector<std::vector<native_handle_t*> >* nh,
- IGraphicBufferProducer::QueueBufferOutput const& l) {
+ BGraphicBufferProducer::QueueBufferOutput const& l) {
if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
return false;
}
@@ -2107,19 +2108,19 @@
}
/**
- * \brief Convert `IOmxBufferProducer::QueueBufferOutput` to
- * `IGraphicBufferProducer::QueueBufferOutput`.
+ * \brief Convert `HGraphicBufferProducer::QueueBufferOutput` to
+ * `BGraphicBufferProducer::QueueBufferOutput`.
*
- * \param[out] l The destination `IGraphicBufferProducer::QueueBufferOutput`.
- * \param[in] t The source `IOmxBufferProducer::QueueBufferOutput`.
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferOutput`.
*
* This function will duplicate all file descriptors contained in \p t.
*/
-// convert: IOmxBufferProducer::QueueBufferOutput ->
-// IGraphicBufferProducer::QueueBufferOutput
+// convert: HGraphicBufferProducer::QueueBufferOutput ->
+// BGraphicBufferProducer::QueueBufferOutput
inline bool convertTo(
- IGraphicBufferProducer::QueueBufferOutput* l,
- IOmxBufferProducer::QueueBufferOutput const& t) {
+ BGraphicBufferProducer::QueueBufferOutput* l,
+ HGraphicBufferProducer::QueueBufferOutput const& t) {
if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
return false;
}
@@ -2133,39 +2134,39 @@
}
/**
- * \brief Convert `IGraphicBufferProducer::DisconnectMode` to
- * `IOmxBufferProducer::DisconnectMode`.
+ * \brief Convert `BGraphicBufferProducer::DisconnectMode` to
+ * `HGraphicBufferProducer::DisconnectMode`.
*
- * \param[in] l The source `IGraphicBufferProducer::DisconnectMode`.
- * \return The corresponding `IOmxBufferProducer::DisconnectMode`.
+ * \param[in] l The source `BGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `HGraphicBufferProducer::DisconnectMode`.
*/
-inline IOmxBufferProducer::DisconnectMode toOmxDisconnectMode(
- IGraphicBufferProducer::DisconnectMode l) {
+inline HGraphicBufferProducer::DisconnectMode toOmxDisconnectMode(
+ BGraphicBufferProducer::DisconnectMode l) {
switch (l) {
- case IGraphicBufferProducer::DisconnectMode::Api:
- return IOmxBufferProducer::DisconnectMode::API;
- case IGraphicBufferProducer::DisconnectMode::AllLocal:
- return IOmxBufferProducer::DisconnectMode::ALL_LOCAL;
+ case BGraphicBufferProducer::DisconnectMode::Api:
+ return HGraphicBufferProducer::DisconnectMode::API;
+ case BGraphicBufferProducer::DisconnectMode::AllLocal:
+ return HGraphicBufferProducer::DisconnectMode::ALL_LOCAL;
}
- return IOmxBufferProducer::DisconnectMode::API;
+ return HGraphicBufferProducer::DisconnectMode::API;
}
/**
- * \brief Convert `IOmxBufferProducer::DisconnectMode` to
- * `IGraphicBufferProducer::DisconnectMode`.
+ * \brief Convert `HGraphicBufferProducer::DisconnectMode` to
+ * `BGraphicBufferProducer::DisconnectMode`.
*
- * \param[in] l The source `IOmxBufferProducer::DisconnectMode`.
- * \return The corresponding `IGraphicBufferProducer::DisconnectMode`.
+ * \param[in] l The source `HGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `BGraphicBufferProducer::DisconnectMode`.
*/
-inline IGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
- IOmxBufferProducer::DisconnectMode t) {
+inline BGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+ HGraphicBufferProducer::DisconnectMode t) {
switch (t) {
- case IOmxBufferProducer::DisconnectMode::API:
- return IGraphicBufferProducer::DisconnectMode::Api;
- case IOmxBufferProducer::DisconnectMode::ALL_LOCAL:
- return IGraphicBufferProducer::DisconnectMode::AllLocal;
+ case HGraphicBufferProducer::DisconnectMode::API:
+ return BGraphicBufferProducer::DisconnectMode::Api;
+ case HGraphicBufferProducer::DisconnectMode::ALL_LOCAL:
+ return BGraphicBufferProducer::DisconnectMode::AllLocal;
}
- return IGraphicBufferProducer::DisconnectMode::Api;
+ return BGraphicBufferProducer::DisconnectMode::Api;
}
} // namespace implementation
diff --git a/media/libstagefright/omx/hal/1.0/impl/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
similarity index 96%
rename from media/libstagefright/omx/hal/1.0/impl/Omx.cpp
rename to media/libstagefright/omx/1.0/Omx.cpp
index 0ef7c8c..134c661 100644
--- a/media/libstagefright/omx/hal/1.0/impl/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -22,13 +22,13 @@
#include <OMX_Core.h>
#include <OMX_AsString.h>
-#include "../../../OMXUtils.h"
-#include "../../../OMXMaster.h"
-#include "../../../GraphicBufferSource.h"
+#include "../OMXUtils.h"
+#include "../OMXMaster.h"
+#include "../GraphicBufferSource.h"
#include "WOmxNode.h"
#include "WOmxObserver.h"
-#include "WOmxBufferProducer.h"
+#include "WGraphicBufferProducer.h"
#include "WGraphicBufferSource.h"
#include "Conversion.h"
@@ -52,7 +52,6 @@
Return<void> Omx::listNodes(listNodes_cb _hidl_cb) {
std::list<::android::IOMX::ComponentInfo> list;
- OMX_U32 index = 0;
char componentName[256];
for (OMX_U32 index = 0;
mMaster->enumerateComponents(
@@ -136,7 +135,7 @@
bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
_hidl_cb(toStatus(OK),
- new TWOmxBufferProducer(bufferProducer),
+ new TWGraphicBufferProducer(bufferProducer),
new TWGraphicBufferSource(graphicBufferSource));
return Void();
}
diff --git a/media/libstagefright/omx/hal/1.0/impl/Omx.h b/media/libstagefright/omx/1.0/Omx.h
similarity index 97%
rename from media/libstagefright/omx/hal/1.0/impl/Omx.h
rename to media/libstagefright/omx/1.0/Omx.h
index 3e9ea73..001e8cb 100644
--- a/media/libstagefright/omx/hal/1.0/impl/Omx.h
+++ b/media/libstagefright/omx/1.0/Omx.h
@@ -20,7 +20,7 @@
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
-#include "../../../../include/OMXNodeInstance.h"
+#include "../../include/OMXNodeInstance.h"
#include <android/hardware/media/omx/1.0/IOmx.h>
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
new file mode 100644
index 0000000..36bd624
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "WGraphicBufferProducer-impl"
+
+#include <android-base/logging.h>
+
+#include "WGraphicBufferProducer.h"
+#include "WProducerListener.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// TWGraphicBufferProducer
+TWGraphicBufferProducer::TWGraphicBufferProducer(
+ sp<BGraphicBufferProducer> const& base):
+ mBase(base) {
+}
+
+Return<void> TWGraphicBufferProducer::requestBuffer(
+ int32_t slot, requestBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> buf;
+ status_t status = mBase->requestBuffer(slot, &buf);
+ AnwBuffer anwBuffer;
+ wrapAs(&anwBuffer, *buf);
+ _hidl_cb(static_cast<int32_t>(status), anwBuffer);
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::setMaxDequeuedBufferCount(
+ int32_t maxDequeuedBuffers) {
+ return static_cast<int32_t>(mBase->setMaxDequeuedBufferCount(
+ static_cast<int>(maxDequeuedBuffers)));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setAsyncMode(bool async) {
+ return static_cast<int32_t>(mBase->setAsyncMode(async));
+}
+
+Return<void> TWGraphicBufferProducer::dequeueBuffer(
+ uint32_t width, uint32_t height,
+ PixelFormat format, uint32_t usage,
+ bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
+ int slot;
+ sp<Fence> fence;
+ ::android::FrameEventHistoryDelta outTimestamps;
+ status_t status = mBase->dequeueBuffer(
+ &slot, &fence,
+ width, height,
+ static_cast<::android::PixelFormat>(format), usage,
+ getFrameTimestamps ? &outTimestamps : nullptr);
+ hidl_handle tFence;
+ FrameEventHistoryDelta tOutTimestamps;
+
+ native_handle_t* nh = nullptr;
+ if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ return Void();
+ }
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output timestamps";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ if (getFrameTimestamps) {
+ for (auto& nhA : nhAA) {
+ for (auto& handle : nhA) {
+ native_handle_delete(handle);
+ }
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::detachBuffer(int32_t slot) {
+ return static_cast<int32_t>(mBase->detachBuffer(slot));
+}
+
+Return<void> TWGraphicBufferProducer::detachNextBuffer(
+ detachNextBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> outBuffer;
+ sp<Fence> outFence;
+ status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
+ AnwBuffer tBuffer;
+ hidl_handle tFence;
+
+ if (outBuffer == nullptr) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output buffer";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+ wrapAs(&tBuffer, *outBuffer);
+ native_handle_t* nh = nullptr;
+ if ((outFence != nullptr) && !wrapAs(&tFence, &nh, *outFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ native_handle_delete(nh);
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::attachBuffer(
+ const AnwBuffer& buffer,
+ attachBuffer_cb _hidl_cb) {
+ int outSlot;
+ sp<GraphicBuffer> lBuffer = new GraphicBuffer();
+ if (!convertTo(lBuffer.get(), buffer)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::attachBuffer - "
+ "Invalid input native window buffer";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), -1);
+ return Void();
+ }
+ status_t status = mBase->attachBuffer(&outSlot, lBuffer);
+
+ _hidl_cb(static_cast<int32_t>(status), static_cast<int32_t>(outSlot));
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::queueBuffer(
+ int32_t slot, const QueueBufferInput& input,
+ queueBuffer_cb _hidl_cb) {
+ QueueBufferOutput tOutput;
+ BGraphicBufferProducer::QueueBufferInput lInput(
+ 0, false, HAL_DATASPACE_UNKNOWN,
+ ::android::Rect(0, 0, 1, 1),
+ NATIVE_WINDOW_SCALING_MODE_FREEZE,
+ 0, ::android::Fence::NO_FENCE);
+ if (!convertTo(&lInput, input)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid input";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->queueBuffer(
+ static_cast<int>(slot), lInput, &lOutput);
+
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::cancelBuffer(
+ int32_t slot, const hidl_handle& fence) {
+ sp<Fence> lFence = new Fence();
+ if (!convertTo(lFence.get(), fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::cancelBuffer - "
+ "Invalid input fence";
+ return static_cast<int32_t>(BAD_VALUE);
+ }
+ return static_cast<int32_t>(mBase->cancelBuffer(static_cast<int>(slot), lFence));
+}
+
+Return<void> TWGraphicBufferProducer::query(int32_t what, query_cb _hidl_cb) {
+ int lValue;
+ int lReturn = mBase->query(static_cast<int>(what), &lValue);
+ _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::connect(
+ const sp<HProducerListener>& listener,
+ int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
+ sp<BProducerListener> lListener = listener == nullptr ?
+ nullptr : new LWProducerListener(listener);
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->connect(lListener,
+ static_cast<int>(api),
+ producerControlledByApp,
+ &lOutput);
+
+ QueueBufferOutput tOutput;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::connect - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::disconnect(
+ int32_t api, DisconnectMode mode) {
+ return static_cast<int32_t>(mBase->disconnect(
+ static_cast<int>(api),
+ toGuiDisconnectMode(mode)));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setSidebandStream(const hidl_handle& stream) {
+ return static_cast<int32_t>(mBase->setSidebandStream(NativeHandle::create(
+ native_handle_clone(stream), true)));
+}
+
+Return<void> TWGraphicBufferProducer::allocateBuffers(
+ uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
+ mBase->allocateBuffers(
+ width, height,
+ static_cast<::android::PixelFormat>(format),
+ usage);
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::allowAllocation(bool allow) {
+ return static_cast<int32_t>(mBase->allowAllocation(allow));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+ return static_cast<int32_t>(mBase->setGenerationNumber(generationNumber));
+}
+
+Return<void> TWGraphicBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
+ _hidl_cb(mBase->getConsumerName().string());
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+ return static_cast<int32_t>(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setAutoRefresh(bool autoRefresh) {
+ return static_cast<int32_t>(mBase->setAutoRefresh(autoRefresh));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
+ return static_cast<int32_t>(mBase->setDequeueTimeout(timeoutNs));
+}
+
+Return<void> TWGraphicBufferProducer::getLastQueuedBuffer(
+ getLastQueuedBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
+ sp<Fence> lOutFence = new Fence();
+ float lOutTransformMatrix[16];
+ status_t status = mBase->getLastQueuedBuffer(
+ &lOutBuffer, &lOutFence, lOutTransformMatrix);
+
+ AnwBuffer tOutBuffer;
+ if (lOutBuffer != nullptr) {
+ wrapAs(&tOutBuffer, *lOutBuffer);
+ }
+ hidl_handle tOutFence;
+ native_handle_t* nh = nullptr;
+ if ((lOutFence == nullptr) || !wrapAs(&tOutFence, &nh, *lOutFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getLastQueuedBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ tOutBuffer,
+ tOutFence,
+ hidl_array<float, 16>());
+ return Void();
+ }
+ hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
+
+ _hidl_cb(static_cast<int32_t>(status), tOutBuffer, tOutFence, tOutTransformMatrix);
+ native_handle_delete(nh);
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::getFrameTimestamps(
+ getFrameTimestamps_cb _hidl_cb) {
+ ::android::FrameEventHistoryDelta lDelta;
+ mBase->getFrameTimestamps(&lDelta);
+
+ FrameEventHistoryDelta tDelta;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tDelta, &nhAA, lDelta)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getFrameTimestamps - "
+ "Invalid output frame timestamps";
+ _hidl_cb(tDelta);
+ return Void();
+ }
+
+ _hidl_cb(tDelta);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
+ uint64_t outId;
+ status_t status = mBase->getUniqueId(&outId);
+ _hidl_cb(static_cast<int32_t>(status), outId);
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.h b/media/libstagefright/omx/1.0/WGraphicBufferProducer.h
new file mode 100644
index 0000000..4a3fe0c
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferProducer.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IProducerListener.h>
+
+#include <android/hardware/graphics/bufferqueue/1.0/IGraphicBufferProducer.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IGraphicBufferProducer HGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IProducerListener HProducerListener;
+
+typedef ::android::IGraphicBufferProducer BGraphicBufferProducer;
+typedef ::android::IProducerListener BProducerListener;
+using ::android::BnGraphicBufferProducer;
+
+struct TWGraphicBufferProducer : public HGraphicBufferProducer {
+ sp<BGraphicBufferProducer> mBase;
+ TWGraphicBufferProducer(sp<BGraphicBufferProducer> const& base);
+ Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
+ override;
+ Return<int32_t> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
+ override;
+ Return<int32_t> setAsyncMode(bool async) override;
+ Return<void> dequeueBuffer(
+ uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
+ bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
+ Return<int32_t> detachBuffer(int32_t slot) override;
+ Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
+ Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
+ override;
+ Return<void> queueBuffer(
+ int32_t slot, const HGraphicBufferProducer::QueueBufferInput& input,
+ queueBuffer_cb _hidl_cb) override;
+ Return<int32_t> cancelBuffer(int32_t slot, const hidl_handle& fence)
+ override;
+ Return<void> query(int32_t what, query_cb _hidl_cb) override;
+ Return<void> connect(const sp<HProducerListener>& listener,
+ int32_t api, bool producerControlledByApp,
+ connect_cb _hidl_cb) override;
+ Return<int32_t> disconnect(
+ int32_t api,
+ HGraphicBufferProducer::DisconnectMode mode) override;
+ Return<int32_t> setSidebandStream(const hidl_handle& stream) override;
+ Return<void> allocateBuffers(
+ uint32_t width, uint32_t height,
+ PixelFormat format, uint32_t usage) override;
+ Return<int32_t> allowAllocation(bool allow) override;
+ Return<int32_t> setGenerationNumber(uint32_t generationNumber) override;
+ Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
+ Return<int32_t> setSharedBufferMode(bool sharedBufferMode) override;
+ Return<int32_t> setAutoRefresh(bool autoRefresh) override;
+ Return<int32_t> setDequeueTimeout(int64_t timeoutNs) override;
+ Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
+ Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
+ Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.cpp
rename to media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
diff --git a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
similarity index 98%
rename from media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
rename to media/libstagefright/omx/1.0/WGraphicBufferSource.h
index 8cf11ca..73b86b8 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
@@ -28,7 +28,7 @@
#include <android/BnGraphicBufferSource.h>
-#include "../../../GraphicBufferSource.h"
+#include "../GraphicBufferSource.h"
namespace android {
namespace hardware {
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp b/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.cpp
rename to media/libstagefright/omx/1.0/WOmxBufferSource.cpp
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h b/media/libstagefright/omx/1.0/WOmxBufferSource.h
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxBufferSource.h
rename to media/libstagefright/omx/1.0/WOmxBufferSource.h
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxNode.cpp
rename to media/libstagefright/omx/1.0/WOmxNode.cpp
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h b/media/libstagefright/omx/1.0/WOmxNode.h
similarity index 98%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
rename to media/libstagefright/omx/1.0/WOmxNode.h
index 75816ba..8ca3e67 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxNode.h
+++ b/media/libstagefright/omx/1.0/WOmxNode.h
@@ -22,7 +22,7 @@
#include <utils/Errors.h>
-#include "../../../../include/OMXNodeInstance.h"
+#include "../../include/OMXNodeInstance.h"
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp b/media/libstagefright/omx/1.0/WOmxObserver.cpp
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxObserver.cpp
rename to media/libstagefright/omx/1.0/WOmxObserver.cpp
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h b/media/libstagefright/omx/1.0/WOmxObserver.h
similarity index 100%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxObserver.h
rename to media/libstagefright/omx/1.0/WOmxObserver.h
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp b/media/libstagefright/omx/1.0/WProducerListener.cpp
similarity index 69%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
rename to media/libstagefright/omx/1.0/WProducerListener.cpp
index a5eed35..be0d4d5 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.cpp
+++ b/media/libstagefright/omx/1.0/WProducerListener.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "WOmxProducerListener.h"
+#include "WProducerListener.h"
namespace android {
namespace hardware {
@@ -23,32 +23,32 @@
namespace V1_0 {
namespace implementation {
-// TWOmxProducerListener
-TWOmxProducerListener::TWOmxProducerListener(
- sp<IProducerListener> const& base):
+// TWProducerListener
+TWProducerListener::TWProducerListener(
+ sp<BProducerListener> const& base):
mBase(base) {
}
-Return<void> TWOmxProducerListener::onBufferReleased() {
+Return<void> TWProducerListener::onBufferReleased() {
mBase->onBufferReleased();
return Void();
}
-Return<bool> TWOmxProducerListener::needsReleaseNotify() {
+Return<bool> TWProducerListener::needsReleaseNotify() {
return mBase->needsReleaseNotify();
}
-// LWOmxProducerListener
-LWOmxProducerListener::LWOmxProducerListener(
- sp<IOmxProducerListener> const& base):
+// LWProducerListener
+LWProducerListener::LWProducerListener(
+ sp<HProducerListener> const& base):
mBase(base) {
}
-void LWOmxProducerListener::onBufferReleased() {
+void LWProducerListener::onBufferReleased() {
mBase->onBufferReleased();
}
-bool LWOmxProducerListener::needsReleaseNotify() {
+bool LWProducerListener::needsReleaseNotify() {
return static_cast<bool>(mBase->needsReleaseNotify());
}
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h b/media/libstagefright/omx/1.0/WProducerListener.h
similarity index 76%
rename from media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
rename to media/libstagefright/omx/1.0/WProducerListener.h
index e60032e..a75e48a 100644
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxProducerListener.h
+++ b/media/libstagefright/omx/1.0/WProducerListener.h
@@ -23,7 +23,7 @@
#include <binder/IBinder.h>
#include <gui/IProducerListener.h>
-#include <android/hardware/media/omx/1.0/IOmxProducerListener.h>
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
namespace android {
namespace hardware {
@@ -32,7 +32,6 @@
namespace V1_0 {
namespace implementation {
-using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
using ::android::hidl::base::V1_0::IBase;
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_memory;
@@ -42,20 +41,23 @@
using ::android::hardware::Void;
using ::android::sp;
-using ::android::IProducerListener;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IProducerListener
+ HProducerListener;
+typedef ::android::IProducerListener
+ BProducerListener;
using ::android::BnProducerListener;
-struct TWOmxProducerListener : public IOmxProducerListener {
- sp<IProducerListener> mBase;
- TWOmxProducerListener(sp<IProducerListener> const& base);
+struct TWProducerListener : public HProducerListener {
+ sp<BProducerListener> mBase;
+ TWProducerListener(sp<BProducerListener> const& base);
Return<void> onBufferReleased() override;
Return<bool> needsReleaseNotify() override;
};
-class LWOmxProducerListener : public BnProducerListener {
+class LWProducerListener : public BnProducerListener {
public:
- sp<IOmxProducerListener> mBase;
- LWOmxProducerListener(sp<IOmxProducerListener> const& base);
+ sp<HProducerListener> mBase;
+ LWProducerListener(sp<HProducerListener> const& base);
void onBufferReleased() override;
bool needsReleaseNotify() override;
};
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 9cba3d0..b1508dc 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -1,19 +1,26 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
-LOCAL_SRC_FILES:= \
- FrameDropper.cpp \
- GraphicBufferSource.cpp \
- BWGraphicBufferSource.cpp \
- OMX.cpp \
- OMXMaster.cpp \
- OMXNodeInstance.cpp \
- OMXUtils.cpp \
- SimpleSoftOMXComponent.cpp \
- SoftOMXComponent.cpp \
- SoftOMXPlugin.cpp \
- SoftVideoDecoderOMXComponent.cpp \
- SoftVideoEncoderOMXComponent.cpp \
+LOCAL_SRC_FILES:= \
+ FrameDropper.cpp \
+ GraphicBufferSource.cpp \
+ BWGraphicBufferSource.cpp \
+ OMX.cpp \
+ OMXMaster.cpp \
+ OMXNodeInstance.cpp \
+ OMXUtils.cpp \
+ SimpleSoftOMXComponent.cpp \
+ SoftOMXComponent.cpp \
+ SoftOMXPlugin.cpp \
+ SoftVideoDecoderOMXComponent.cpp \
+ SoftVideoEncoderOMXComponent.cpp \
+ 1.0/Omx.cpp \
+ 1.0/WGraphicBufferProducer.cpp \
+ 1.0/WProducerListener.cpp \
+ 1.0/WGraphicBufferSource.cpp \
+ 1.0/WOmxNode.cpp \
+ 1.0/WOmxObserver.cpp \
+ 1.0/WOmxBufferSource.cpp \
LOCAL_C_INCLUDES += \
$(TOP)/frameworks/av/media/libstagefright \
@@ -21,25 +28,34 @@
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/system/libhidl/base/include \
-LOCAL_SHARED_LIBRARIES := \
- libbinder \
- libmedia \
- libutils \
- liblog \
- libui \
- libgui \
- libcutils \
- libstagefright_foundation \
- libdl \
- libhidlbase \
- libhidlmemory \
- android.hidl.memory@1.0 \
+LOCAL_SHARED_LIBRARIES := \
+ libbase \
+ libbinder \
+ libmedia \
+ libutils \
+ liblog \
+ libui \
+ libgui \
+ libcutils \
+ libstagefright_foundation \
+ libdl \
+ libhidlbase \
+ libhidlmemory \
+ android.hidl.base@1.0 \
+ android.hidl.memory@1.0 \
+ android.hardware.media@1.0 \
+ android.hardware.media.omx@1.0 \
+ android.hardware.graphics.common@1.0 \
+ android.hardware.graphics.bufferqueue@1.0 \
+
+LOCAL_EXPORT_C_INCLUDES := \
+ $(TOP)/frameworks/av/include
LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := \
android.hidl.memory@1.0
LOCAL_MODULE:= libstagefright_omx
-LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter -Wno-documentation
LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
LOCAL_SANITIZE_DIAG := cfi
diff --git a/media/libstagefright/omx/hal/1.0/Android.mk b/media/libstagefright/omx/hal/1.0/Android.mk
deleted file mode 100644
index c14e909..0000000
--- a/media/libstagefright/omx/hal/1.0/Android.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-include $(call all-makefiles-under,$(LOCAL_PATH))
-
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
deleted file mode 100644
index b6b9a3b..0000000
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.cpp
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WOmxBufferProducer-impl"
-
-#include <android-base/logging.h>
-
-#include "WOmxBufferProducer.h"
-#include "WOmxProducerListener.h"
-#include "Conversion.h"
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-// TWOmxBufferProducer
-TWOmxBufferProducer::TWOmxBufferProducer(
- sp<IGraphicBufferProducer> const& base):
- mBase(base) {
-}
-
-Return<void> TWOmxBufferProducer::requestBuffer(
- int32_t slot, requestBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> buf;
- status_t status = mBase->requestBuffer(slot, &buf);
- AnwBuffer anwBuffer;
- wrapAs(&anwBuffer, *buf);
- _hidl_cb(toStatus(status), anwBuffer);
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::setMaxDequeuedBufferCount(
- int32_t maxDequeuedBuffers) {
- return toStatus(mBase->setMaxDequeuedBufferCount(
- static_cast<int>(maxDequeuedBuffers)));
-}
-
-Return<Status> TWOmxBufferProducer::setAsyncMode(bool async) {
- return toStatus(mBase->setAsyncMode(async));
-}
-
-Return<void> TWOmxBufferProducer::dequeueBuffer(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
- int slot;
- sp<Fence> fence;
- ::android::FrameEventHistoryDelta outTimestamps;
- status_t status = mBase->dequeueBuffer(
- &slot, &fence,
- width, height,
- static_cast<::android::PixelFormat>(format), usage,
- getFrameTimestamps ? &outTimestamps : nullptr);
- hidl_handle tFence;
- FrameEventHistoryDelta tOutTimestamps;
-
- native_handle_t* nh = nullptr;
- if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
- LOG(ERROR) << "TWOmxBufferProducer::dequeueBuffer - "
- "Invalid output fence";
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- return Void();
- }
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
- LOG(ERROR) << "TWOmxBufferProducer::dequeueBuffer - "
- "Invalid output timestamps";
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- return Void();
- }
-
- _hidl_cb(toStatus(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- if (getFrameTimestamps) {
- for (auto& nhA : nhAA) {
- for (auto& handle : nhA) {
- native_handle_delete(handle);
- }
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::detachBuffer(int32_t slot) {
- return toStatus(mBase->detachBuffer(slot));
-}
-
-Return<void> TWOmxBufferProducer::detachNextBuffer(
- detachNextBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> outBuffer;
- sp<Fence> outFence;
- status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
- AnwBuffer tBuffer;
- hidl_handle tFence;
-
- if (outBuffer == nullptr) {
- LOG(ERROR) << "TWOmxBufferProducer::detachNextBuffer - "
- "Invalid output buffer";
- _hidl_cb(toStatus(status), tBuffer, tFence);
- return Void();
- }
- wrapAs(&tBuffer, *outBuffer);
- native_handle_t* nh = nullptr;
- if ((outFence != nullptr) && !wrapAs(&tFence, &nh, *outFence)) {
- LOG(ERROR) << "TWOmxBufferProducer::detachNextBuffer - "
- "Invalid output fence";
- _hidl_cb(toStatus(status), tBuffer, tFence);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tBuffer, tFence);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::attachBuffer(
- const AnwBuffer& buffer,
- attachBuffer_cb _hidl_cb) {
- int outSlot;
- sp<GraphicBuffer> lBuffer = new GraphicBuffer();
- if (!convertTo(lBuffer.get(), buffer)) {
- LOG(ERROR) << "TWOmxBufferProducer::attachBuffer - "
- "Invalid input native window buffer";
- _hidl_cb(toStatus(BAD_VALUE), -1);
- return Void();
- }
- status_t status = mBase->attachBuffer(&outSlot, lBuffer);
-
- _hidl_cb(toStatus(status), static_cast<int32_t>(outSlot));
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::queueBuffer(
- int32_t slot, const QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) {
- QueueBufferOutput tOutput;
- IGraphicBufferProducer::QueueBufferInput lInput(
- 0, false, HAL_DATASPACE_UNKNOWN,
- ::android::Rect(0, 0, 1, 1),
- NATIVE_WINDOW_SCALING_MODE_FREEZE,
- 0, ::android::Fence::NO_FENCE);
- if (!convertTo(&lInput, input)) {
- LOG(ERROR) << "TWOmxBufferProducer::queueBuffer - "
- "Invalid input";
- _hidl_cb(toStatus(BAD_VALUE), tOutput);
- return Void();
- }
- IGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->queueBuffer(
- static_cast<int>(slot), lInput, &lOutput);
-
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- LOG(ERROR) << "TWOmxBufferProducer::queueBuffer - "
- "Invalid output";
- _hidl_cb(toStatus(BAD_VALUE), tOutput);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::cancelBuffer(
- int32_t slot, const hidl_handle& fence) {
- sp<Fence> lFence = new Fence();
- if (!convertTo(lFence.get(), fence)) {
- LOG(ERROR) << "TWOmxBufferProducer::cancelBuffer - "
- "Invalid input fence";
- return toStatus(BAD_VALUE);
- }
- return toStatus(mBase->cancelBuffer(static_cast<int>(slot), lFence));
-}
-
-Return<void> TWOmxBufferProducer::query(int32_t what, query_cb _hidl_cb) {
- int lValue;
- int lReturn = mBase->query(static_cast<int>(what), &lValue);
- _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::connect(
- const sp<IOmxProducerListener>& listener,
- int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
- sp<IProducerListener> lListener = listener == nullptr ?
- nullptr : new LWOmxProducerListener(listener);
- IGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->connect(lListener,
- static_cast<int>(api),
- producerControlledByApp,
- &lOutput);
-
- QueueBufferOutput tOutput;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- LOG(ERROR) << "TWOmxBufferProducer::connect - "
- "Invalid output";
- _hidl_cb(toStatus(status), tOutput);
- return Void();
- }
-
- _hidl_cb(toStatus(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::disconnect(
- int32_t api, DisconnectMode mode) {
- return toStatus(mBase->disconnect(
- static_cast<int>(api),
- toGuiDisconnectMode(mode)));
-}
-
-Return<Status> TWOmxBufferProducer::setSidebandStream(const hidl_handle& stream) {
- return toStatus(mBase->setSidebandStream(NativeHandle::create(
- native_handle_clone(stream), true)));
-}
-
-Return<void> TWOmxBufferProducer::allocateBuffers(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
- mBase->allocateBuffers(
- width, height,
- static_cast<::android::PixelFormat>(format),
- usage);
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::allowAllocation(bool allow) {
- return toStatus(mBase->allowAllocation(allow));
-}
-
-Return<Status> TWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
- return toStatus(mBase->setGenerationNumber(generationNumber));
-}
-
-Return<void> TWOmxBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
- _hidl_cb(mBase->getConsumerName().string());
- return Void();
-}
-
-Return<Status> TWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
- return toStatus(mBase->setSharedBufferMode(sharedBufferMode));
-}
-
-Return<Status> TWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
- return toStatus(mBase->setAutoRefresh(autoRefresh));
-}
-
-Return<Status> TWOmxBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
- return toStatus(mBase->setDequeueTimeout(timeoutNs));
-}
-
-Return<void> TWOmxBufferProducer::getLastQueuedBuffer(
- getLastQueuedBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
- sp<Fence> lOutFence = new Fence();
- float lOutTransformMatrix[16];
- status_t status = mBase->getLastQueuedBuffer(
- &lOutBuffer, &lOutFence, lOutTransformMatrix);
-
- AnwBuffer tOutBuffer;
- if (lOutBuffer != nullptr) {
- wrapAs(&tOutBuffer, *lOutBuffer);
- }
- hidl_handle tOutFence;
- native_handle_t* nh = nullptr;
- if ((lOutFence == nullptr) || !wrapAs(&tOutFence, &nh, *lOutFence)) {
- LOG(ERROR) << "TWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output fence";
- _hidl_cb(toStatus(status),
- tOutBuffer,
- tOutFence,
- hidl_array<float, 16>());
- return Void();
- }
- hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
-
- _hidl_cb(toStatus(status), tOutBuffer, tOutFence, tOutTransformMatrix);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::getFrameTimestamps(
- getFrameTimestamps_cb _hidl_cb) {
- ::android::FrameEventHistoryDelta lDelta;
- mBase->getFrameTimestamps(&lDelta);
-
- FrameEventHistoryDelta tDelta;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tDelta, &nhAA, lDelta)) {
- LOG(ERROR) << "TWOmxBufferProducer::getFrameTimestamps - "
- "Invalid output frame timestamps";
- _hidl_cb(tDelta);
- return Void();
- }
-
- _hidl_cb(tDelta);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<void> TWOmxBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
- uint64_t outId;
- status_t status = mBase->getUniqueId(&outId);
- _hidl_cb(toStatus(status), outId);
- return Void();
-}
-
-// LWOmxBufferProducer
-
-LWOmxBufferProducer::LWOmxBufferProducer(sp<IOmxBufferProducer> const& base) :
- mBase(base) {
-}
-
-status_t LWOmxBufferProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
- *buf = new GraphicBuffer();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->requestBuffer(
- static_cast<int32_t>(slot),
- [&fnStatus, &buf] (Status status, AnwBuffer const& buffer) {
- fnStatus = toStatusT(status);
- if (!convertTo(buf->get(), buffer)) {
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::setMaxDequeuedBufferCount(
- int maxDequeuedBuffers) {
- return toStatusT(mBase->setMaxDequeuedBufferCount(
- static_cast<int32_t>(maxDequeuedBuffers)));
-}
-
-status_t LWOmxBufferProducer::setAsyncMode(bool async) {
- return toStatusT(mBase->setAsyncMode(async));
-}
-
-status_t LWOmxBufferProducer::dequeueBuffer(
- int* slot, sp<Fence>* fence,
- uint32_t w, uint32_t h, ::android::PixelFormat format,
- uint32_t usage, FrameEventHistoryDelta* outTimestamps) {
- *fence = new Fence();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->dequeueBuffer(
- w, h, static_cast<PixelFormat>(format), usage,
- outTimestamps != nullptr,
- [&fnStatus, slot, fence, outTimestamps] (
- Status status,
- int32_t tSlot,
- hidl_handle const& tFence,
- IOmxBufferProducer::FrameEventHistoryDelta const& tTs) {
- fnStatus = toStatusT(status);
- *slot = tSlot;
- if (!convertTo(fence->get(), tFence)) {
- LOG(ERROR) << "LWOmxBufferProducer::dequeueBuffer - "
- "Invalid output fence";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- if (outTimestamps && !convertTo(outTimestamps, tTs)) {
- LOG(ERROR) << "LWOmxBufferProducer::dequeueBuffer - "
- "Invalid output timestamps";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::detachBuffer(int slot) {
- return toStatusT(mBase->detachBuffer(static_cast<int>(slot)));
-}
-
-status_t LWOmxBufferProducer::detachNextBuffer(
- sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence) {
- *outBuffer = new GraphicBuffer();
- *outFence = new Fence();
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->detachNextBuffer(
- [&fnStatus, outBuffer, outFence] (
- Status status,
- AnwBuffer const& tBuffer,
- hidl_handle const& tFence) {
- fnStatus = toStatusT(status);
- if (!convertTo(outFence->get(), tFence)) {
- LOG(ERROR) << "LWOmxBufferProducer::detachNextBuffer - "
- "Invalid output fence";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- if (!convertTo(outBuffer->get(), tBuffer)) {
- LOG(ERROR) << "LWOmxBufferProducer::detachNextBuffer - "
- "Invalid output buffer";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::attachBuffer(
- int* outSlot, const sp<GraphicBuffer>& buffer) {
- AnwBuffer tBuffer;
- wrapAs(&tBuffer, *buffer);
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->attachBuffer(tBuffer,
- [&fnStatus, outSlot] (Status status, int32_t slot) {
- fnStatus = toStatusT(status);
- *outSlot = slot;
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::queueBuffer(
- int slot,
- const QueueBufferInput& input,
- QueueBufferOutput* output) {
- IOmxBufferProducer::QueueBufferInput tInput;
- native_handle_t* nh;
- if (!wrapAs(&tInput, &nh, input)) {
- LOG(ERROR) << "LWOmxBufferProducer::queueBuffer - "
- "Invalid input";
- return BAD_VALUE;
- }
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->queueBuffer(slot, tInput,
- [&fnStatus, output] (
- Status status,
- IOmxBufferProducer::QueueBufferOutput const& tOutput) {
- fnStatus = toStatusT(status);
- if (!convertTo(output, tOutput)) {
- LOG(ERROR) << "LWOmxBufferProducer::queueBuffer - "
- "Invalid output";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- native_handle_delete(nh);
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
- hidl_handle tFence;
- native_handle_t* nh = nullptr;
- if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
- LOG(ERROR) << "LWOmxBufferProducer::cancelBuffer - "
- "Invalid input fence";
- return BAD_VALUE;
- }
-
- status_t status = toStatusT(mBase->cancelBuffer(
- static_cast<int32_t>(slot), tFence));
- native_handle_delete(nh);
- return status;
-}
-
-int LWOmxBufferProducer::query(int what, int* value) {
- int result;
- status_t transStatus = toStatusT(mBase->query(
- static_cast<int32_t>(what),
- [&result, value] (int32_t tResult, int32_t tValue) {
- result = static_cast<int>(tResult);
- *value = static_cast<int>(tValue);
- }));
- return transStatus == NO_ERROR ? result : static_cast<int>(transStatus);
-}
-
-status_t LWOmxBufferProducer::connect(
- const sp<IProducerListener>& listener, int api,
- bool producerControlledByApp, QueueBufferOutput* output) {
- sp<IOmxProducerListener> tListener = listener == nullptr ?
- nullptr : new TWOmxProducerListener(listener);
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->connect(
- tListener, static_cast<int32_t>(api), producerControlledByApp,
- [&fnStatus, output] (
- Status status,
- IOmxBufferProducer::QueueBufferOutput const& tOutput) {
- fnStatus = toStatusT(status);
- if (!convertTo(output, tOutput)) {
- LOG(ERROR) << "LWOmxBufferProducer::connect - "
- "Invalid output";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-status_t LWOmxBufferProducer::disconnect(int api, DisconnectMode mode) {
- return toStatusT(mBase->disconnect(
- static_cast<int32_t>(api), toOmxDisconnectMode(mode)));
-}
-
-status_t LWOmxBufferProducer::setSidebandStream(
- const sp<NativeHandle>& stream) {
- return toStatusT(mBase->setSidebandStream(stream->handle()));
-}
-
-void LWOmxBufferProducer::allocateBuffers(uint32_t width, uint32_t height,
- ::android::PixelFormat format, uint32_t usage) {
- mBase->allocateBuffers(
- width, height, static_cast<PixelFormat>(format), usage);
-}
-
-status_t LWOmxBufferProducer::allowAllocation(bool allow) {
- return toStatusT(mBase->allowAllocation(allow));
-}
-
-status_t LWOmxBufferProducer::setGenerationNumber(uint32_t generationNumber) {
- return toStatusT(mBase->setGenerationNumber(generationNumber));
-}
-
-String8 LWOmxBufferProducer::getConsumerName() const {
- String8 lName;
- mBase->getConsumerName([&lName] (hidl_string const& name) {
- lName = name.c_str();
- });
- return lName;
-}
-
-status_t LWOmxBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
- return toStatusT(mBase->setSharedBufferMode(sharedBufferMode));
-}
-
-status_t LWOmxBufferProducer::setAutoRefresh(bool autoRefresh) {
- return toStatusT(mBase->setAutoRefresh(autoRefresh));
-}
-
-status_t LWOmxBufferProducer::setDequeueTimeout(nsecs_t timeout) {
- return toStatusT(mBase->setDequeueTimeout(static_cast<int64_t>(timeout)));
-}
-
-status_t LWOmxBufferProducer::getLastQueuedBuffer(
- sp<GraphicBuffer>* outBuffer,
- sp<Fence>* outFence,
- float outTransformMatrix[16]) {
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->getLastQueuedBuffer(
- [&fnStatus, outBuffer, outFence, &outTransformMatrix] (
- Status status,
- AnwBuffer const& buffer,
- hidl_handle const& fence,
- hidl_array<float, 16> const& transformMatrix) {
- fnStatus = toStatusT(status);
- *outBuffer = new GraphicBuffer();
- if (!convertTo(outBuffer->get(), buffer)) {
- LOG(ERROR) << "LWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output buffer";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- *outFence = new Fence();
- if (!convertTo(outFence->get(), fence)) {
- LOG(ERROR) << "LWOmxBufferProducer::getLastQueuedBuffer - "
- "Invalid output fence";
- fnStatus = fnStatus == NO_ERROR ? BAD_VALUE : fnStatus;
- }
- std::copy(transformMatrix.data(),
- transformMatrix.data() + 16,
- outTransformMatrix);
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-void LWOmxBufferProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
- mBase->getFrameTimestamps([outDelta] (
- IOmxBufferProducer::FrameEventHistoryDelta const& tDelta) {
- convertTo(outDelta, tDelta);
- });
-}
-
-status_t LWOmxBufferProducer::getUniqueId(uint64_t* outId) const {
- status_t fnStatus;
- status_t transStatus = toStatusT(mBase->getUniqueId(
- [&fnStatus, outId] (Status status, uint64_t id) {
- fnStatus = toStatusT(status);
- *outId = id;
- }));
- return transStatus == NO_ERROR ? fnStatus : transStatus;
-}
-
-} // namespace implementation
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
diff --git a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h b/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
deleted file mode 100644
index 8520160..0000000
--- a/media/libstagefright/omx/hal/1.0/impl/WOmxBufferProducer.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
-
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <binder/Binder.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/IProducerListener.h>
-
-#include <android/hardware/media/omx/1.0/IOmxBufferProducer.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
-using ::android::hardware::media::omx::V1_0::IOmxBufferProducer;
-using ::android::hardware::media::omx::V1_0::IOmxProducerListener;
-using ::android::hardware::media::omx::V1_0::Status;
-using ::android::hardware::media::V1_0::AnwBuffer;
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-using ::android::IGraphicBufferProducer;
-using ::android::BnGraphicBufferProducer;
-using ::android::IProducerListener;
-
-struct TWOmxBufferProducer : public IOmxBufferProducer {
- sp<IGraphicBufferProducer> mBase;
- TWOmxBufferProducer(sp<IGraphicBufferProducer> const& base);
- Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
- override;
- Return<Status> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
- override;
- Return<Status> setAsyncMode(bool async) override;
- Return<void> dequeueBuffer(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
- Return<Status> detachBuffer(int32_t slot) override;
- Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
- Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
- override;
- Return<void> queueBuffer(
- int32_t slot, const IOmxBufferProducer::QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) override;
- Return<Status> cancelBuffer(int32_t slot, const hidl_handle& fence)
- override;
- Return<void> query(int32_t what, query_cb _hidl_cb) override;
- Return<void> connect(const sp<IOmxProducerListener>& listener,
- int32_t api, bool producerControlledByApp,
- connect_cb _hidl_cb) override;
- Return<Status> disconnect(
- int32_t api,
- IOmxBufferProducer::DisconnectMode mode) override;
- Return<Status> setSidebandStream(const hidl_handle& stream) override;
- Return<void> allocateBuffers(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage) override;
- Return<Status> allowAllocation(bool allow) override;
- Return<Status> setGenerationNumber(uint32_t generationNumber) override;
- Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
- Return<Status> setSharedBufferMode(bool sharedBufferMode) override;
- Return<Status> setAutoRefresh(bool autoRefresh) override;
- Return<Status> setDequeueTimeout(int64_t timeoutNs) override;
- Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
- Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
- Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
-};
-
-struct LWOmxBufferProducer : public BnGraphicBufferProducer {
- sp<IOmxBufferProducer> mBase;
- LWOmxBufferProducer(sp<IOmxBufferProducer> const& base);
-
- status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) override;
- status_t setMaxDequeuedBufferCount(int maxDequeuedBuffers) override;
- status_t setAsyncMode(bool async) override;
- status_t dequeueBuffer(int* slot, sp<Fence>* fence, uint32_t w,
- uint32_t h, ::android::PixelFormat format, uint32_t usage,
- FrameEventHistoryDelta* outTimestamps) override;
- status_t detachBuffer(int slot) override;
- status_t detachNextBuffer(sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence)
- override;
- status_t attachBuffer(int* outSlot, const sp<GraphicBuffer>& buffer)
- override;
- status_t queueBuffer(int slot,
- const QueueBufferInput& input,
- QueueBufferOutput* output) override;
- status_t cancelBuffer(int slot, const sp<Fence>& fence) override;
- int query(int what, int* value) override;
- status_t connect(const sp<IProducerListener>& listener, int api,
- bool producerControlledByApp, QueueBufferOutput* output) override;
- status_t disconnect(int api, DisconnectMode mode = DisconnectMode::Api)
- override;
- status_t setSidebandStream(const sp<NativeHandle>& stream) override;
- void allocateBuffers(uint32_t width, uint32_t height,
- ::android::PixelFormat format, uint32_t usage) override;
- status_t allowAllocation(bool allow) override;
- status_t setGenerationNumber(uint32_t generationNumber) override;
- String8 getConsumerName() const override;
- status_t setSharedBufferMode(bool sharedBufferMode) override;
- status_t setAutoRefresh(bool autoRefresh) override;
- status_t setDequeueTimeout(nsecs_t timeout) override;
- status_t getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
- sp<Fence>* outFence, float outTransformMatrix[16]) override;
- void getFrameTimestamps(FrameEventHistoryDelta* outDelta) override;
- status_t getUniqueId(uint64_t* outId) const override;
-};
-
-} // namespace implementation
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 8817cf9..cbca461 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -40,7 +40,7 @@
#include <media/stagefright/SimpleDecodingSource.h>
#include <media/OMXBuffer.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
-#include <omx/hal/1.0/utils/WOmx.h>
+#include <media/omx/1.0/WOmx.h>
#define DEFAULT_TIMEOUT 500000
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
index 7eb4745..77de3c8 100644
--- a/media/libstagefright/webm/WebmFrameThread.cpp
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -37,17 +37,23 @@
}
status_t WebmFrameThread::start() {
+ status_t err = OK;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- pthread_create(&mThread, &attr, WebmFrameThread::wrap, this);
+ if ((err = pthread_create(&mThread, &attr, WebmFrameThread::wrap, this))) {
+ mThread = 0;
+ }
pthread_attr_destroy(&attr);
- return OK;
+ return err;
}
status_t WebmFrameThread::stop() {
- void *status;
- pthread_join(mThread, &status);
+ void *status = nullptr;
+ if (mThread) {
+ pthread_join(mThread, &status);
+ mThread = 0;
+ }
return (status_t)(intptr_t)status;
}
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 74729e4..67b0ab1 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -34,9 +34,12 @@
LOCAL_C_INCLUDES := \
bionic/libc/private \
+ external/piex \
frameworks/base/core/jni \
+ frameworks/base/media/jni \
frameworks/av/include/ndk \
- system/media/camera/include
+ system/media/camera/include \
+ $(call include-path-for, libhardware)/hardware \
LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
@@ -45,7 +48,9 @@
LOCAL_SHARED_LIBRARIES := \
libbinder \
libmedia \
+ libmedia_jni \
libmediadrm \
+ libskia \
libstagefright \
libstagefright_foundation \
liblog \
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 40900ad..6c9a644 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -22,6 +22,8 @@
#include "NdkImagePriv.h"
#include "NdkImageReaderPriv.h"
+#include <android_media_Utils.h>
+#include <android_runtime/android_hardware_HardwareBuffer.h>
#include <utils/Log.h>
#include "hardware/camera3.h"
@@ -29,11 +31,11 @@
#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
-AImage::AImage(AImageReader* reader, int32_t format,
- CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
+AImage::AImage(AImageReader* reader, int32_t format, uint64_t usage,
+ BufferItem* buffer, int64_t timestamp,
int32_t width, int32_t height, int32_t numPlanes) :
- mReader(reader), mFormat(format),
- mBuffer(buffer), mTimestamp(timestamp),
+ mReader(reader), mFormat(format), mUsage(usage),
+ mBuffer(buffer), mLockedBuffer(nullptr), mTimestamp(timestamp),
mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
}
@@ -66,6 +68,7 @@
// Should have been set to nullptr in releaseImageLocked
// Set to nullptr here for extra safety only
mBuffer = nullptr;
+ mLockedBuffer = nullptr;
mIsClosed = true;
}
@@ -169,8 +172,80 @@
return AMEDIA_OK;
}
+media_status_t AImage::lockImage() {
+ if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
+ LOG_ALWAYS_FATAL("%s: AImage %p has no buffer.", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+
+ if ((mUsage & AHARDWAREBUFFER_USAGE0_CPU_READ_OFTEN) == 0) {
+ ALOGE("%s: AImage %p does not have any software read usage bits set, usage=%" PRIu64 "",
+ __FUNCTION__, this, mUsage);
+ return AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE;
+ }
+
+ if (mLockedBuffer != nullptr) {
+ // Return immediately if the image has already been locked.
+ return AMEDIA_OK;
+ }
+
+ auto lockedBuffer = std::make_unique<CpuConsumer::LockedBuffer>();
+
+ uint64_t producerUsage;
+ uint64_t consumerUsage;
+ android_hardware_HardwareBuffer_convertToGrallocUsageBits(
+ &producerUsage, &consumerUsage, mUsage, 0);
+
+ status_t ret =
+ lockImageFromBuffer(mBuffer, consumerUsage, mBuffer->mFence->dup(), lockedBuffer.get());
+ if (ret != OK) {
+ ALOGE("%s: AImage %p failed to lock, error=%d", __FUNCTION__, this, ret);
+ return AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE;
+ }
+
+ ALOGV("%s: Successfully locked the image %p.", __FUNCTION__, this);
+ mLockedBuffer = std::move(lockedBuffer);
+
+ return AMEDIA_OK;
+}
+
+media_status_t AImage::unlockImageIfLocked(int* fenceFd) {
+ if (fenceFd == nullptr) {
+ LOG_ALWAYS_FATAL("%s: fenceFd cannot be null.", __FUNCTION__);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
+ LOG_ALWAYS_FATAL("%s: AImage %p has no buffer.", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+
+ if (mLockedBuffer == nullptr) {
+ // This image hasn't been locked yet, no need to unlock.
+ *fenceFd = -1;
+ return AMEDIA_OK;
+ }
+
+ // No fence by default.
+ int releaseFenceFd = -1;
+ status_t res = mBuffer->mGraphicBuffer->unlockAsync(&releaseFenceFd);
+ if (res != OK) {
+ ALOGE("%s unlock buffer failed on iamge %p.", __FUNCTION__, this);
+ *fenceFd = -1;
+ return AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE;
+ }
+
+ *fenceFd = releaseFenceFd;
+ return AMEDIA_OK;
+}
+
media_status_t
AImage::getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -183,10 +258,10 @@
ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
return AMEDIA_ERROR_INVALID_OBJECT;
}
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- *pixelStride = (planeIdx == 0) ? 1 : mBuffer->chromaStep;
+ *pixelStride = (planeIdx == 0) ? 1 : mLockedBuffer->chromaStep;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
*pixelStride = (planeIdx == 0) ? 1 : 2;
@@ -226,6 +301,11 @@
media_status_t
AImage::getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -238,54 +318,58 @@
ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
return AMEDIA_ERROR_INVALID_OBJECT;
}
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- *rowStride = (planeIdx == 0) ? mBuffer->stride : mBuffer->chromaStride;
+ *rowStride = (planeIdx == 0) ? mLockedBuffer->stride
+ : mLockedBuffer->chromaStride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- *rowStride = mBuffer->width;
+ *rowStride = mLockedBuffer->width;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YV12:
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!", mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = (planeIdx == 0) ? mBuffer->stride : ALIGN(mBuffer->stride / 2, 16);
+ *rowStride = (planeIdx == 0) ? mLockedBuffer->stride
+ : ALIGN(mLockedBuffer->stride / 2, 16);
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RAW10:
case HAL_PIXEL_FORMAT_RAW12:
// RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
- *rowStride = mBuffer->stride;
+ *rowStride = mLockedBuffer->stride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_Y8:
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = mBuffer->stride;
+ *rowStride = mLockedBuffer->stride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_Y16:
case HAL_PIXEL_FORMAT_RAW16:
// In native side, strides are specified in pixels, not in bytes.
// Single plane 16bpp bayer data. even width/height,
// row stride multiple of 16 pixels (32 bytes)
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = mBuffer->stride * 2;
+ *rowStride = mLockedBuffer->stride * 2;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGB_565:
- *rowStride = mBuffer->stride * 2;
+ *rowStride = mLockedBuffer->stride * 2;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
- *rowStride = mBuffer->stride * 4;
+ *rowStride = mLockedBuffer->stride * 4;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGB_888:
// Single plane, 24bpp.
- *rowStride = mBuffer->stride * 3;
+ *rowStride = mLockedBuffer->stride * 3;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_BLOB:
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
@@ -300,13 +384,13 @@
uint32_t
AImage::getJpegSize() const {
- if (mBuffer == nullptr) {
+ if (mLockedBuffer == nullptr) {
LOG_ALWAYS_FATAL("Error: buffer is null");
}
uint32_t size = 0;
- uint32_t width = mBuffer->width;
- uint8_t* jpegBuffer = mBuffer->data;
+ uint32_t width = mLockedBuffer->width;
+ uint8_t* jpegBuffer = mLockedBuffer->data;
// First check for JPEG transport header at the end of the buffer
uint8_t* header = jpegBuffer + (width - sizeof(struct camera3_jpeg_blob));
@@ -334,6 +418,11 @@
media_status_t
AImage::getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -352,140 +441,154 @@
uint8_t* cr = nullptr;
uint8_t* pData = nullptr;
int bytesPerPixel = 0;
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? mBuffer->dataCb : mBuffer->dataCr;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? mLockedBuffer->dataCb
+ : mLockedBuffer->dataCr;
// only map until last pixel
if (planeIdx == 0) {
- dataSize = mBuffer->stride * (mBuffer->height - 1) + mBuffer->width;
+ dataSize = mLockedBuffer->stride * (mLockedBuffer->height - 1) +
+ mLockedBuffer->width;
} else {
- dataSize = mBuffer->chromaStride * (mBuffer->height / 2 - 1) +
- mBuffer->chromaStep * (mBuffer->width / 2 - 1) + 1;
+ dataSize =
+ mLockedBuffer->chromaStride *
+ (mLockedBuffer->height / 2 - 1) +
+ mLockedBuffer->chromaStep * (mLockedBuffer->width / 2 - 1) +
+ 1;
}
break;
// NV21
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- cr = mBuffer->data + (mBuffer->stride * mBuffer->height);
+ cr = mLockedBuffer->data +
+ (mLockedBuffer->stride * mLockedBuffer->height);
cb = cr + 1;
// only map until last pixel
- ySize = mBuffer->width * (mBuffer->height - 1) + mBuffer->width;
- cSize = mBuffer->width * (mBuffer->height / 2 - 1) + mBuffer->width - 1;
-
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? cb : cr;
+ ySize = mLockedBuffer->width * (mLockedBuffer->height - 1) +
+ mLockedBuffer->width;
+ cSize = mLockedBuffer->width * (mLockedBuffer->height / 2 - 1) +
+ mLockedBuffer->width - 1;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? cb : cr;
dataSize = (planeIdx == 0) ? ySize : cSize;
break;
case HAL_PIXEL_FORMAT_YV12:
// Y and C stride need to be 16 pixel aligned.
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- ySize = mBuffer->stride * mBuffer->height;
- cStride = ALIGN(mBuffer->stride / 2, 16);
- cr = mBuffer->data + ySize;
- cSize = cStride * mBuffer->height / 2;
+ ySize = mLockedBuffer->stride * mLockedBuffer->height;
+ cStride = ALIGN(mLockedBuffer->stride / 2, 16);
+ cr = mLockedBuffer->data + ySize;
+ cSize = cStride * mLockedBuffer->height / 2;
cb = cr + cSize;
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? cb : cr;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? cb : cr;
dataSize = (planeIdx == 0) ? ySize : cSize;
break;
case HAL_PIXEL_FORMAT_Y8:
// Single plane, 8bpp.
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_Y16:
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_BLOB:
// Used for JPEG data, height must be 1, width == size, single plane.
- if (mBuffer->height != 1) {
- ALOGE("Jpeg should have height value one but got %d", mBuffer->height);
+ if (mLockedBuffer->height != 1) {
+ ALOGE("Jpeg should have height value one but got %d",
+ mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
+ pData = mLockedBuffer->data;
dataSize = getJpegSize();
break;
case HAL_PIXEL_FORMAT_RAW16:
// Single plane 16bpp bayer data.
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
// Used for RAW_OPAQUE data, height must be 1, width == size, single plane.
- if (mBuffer->height != 1) {
- ALOGE("RAW_OPAQUE should have height value one but got %d", mBuffer->height);
+ if (mLockedBuffer->height != 1) {
+ ALOGE("RAW_OPAQUE should have height value one but got %d",
+ mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->width;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->width;
break;
case HAL_PIXEL_FORMAT_RAW10:
// Single plane 10bpp bayer data.
- if (mBuffer->width % 4) {
- ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+ if (mLockedBuffer->width % 4) {
+ ALOGE("Width is not multiple of 4 %d", mLockedBuffer->width);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->height % 2) {
- ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+ if (mLockedBuffer->height % 2) {
+ ALOGE("Height is not multiple of 2 %d", mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->stride < (mBuffer->width * 10 / 8)) {
+ if (mLockedBuffer->stride < (mLockedBuffer->width * 10 / 8)) {
ALOGE("stride (%d) should be at least %d",
- mBuffer->stride, mBuffer->width * 10 / 8);
+ mLockedBuffer->stride, mLockedBuffer->width * 10 / 8);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_RAW12:
// Single plane 10bpp bayer data.
- if (mBuffer->width % 4) {
- ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+ if (mLockedBuffer->width % 4) {
+ ALOGE("Width is not multiple of 4 %d", mLockedBuffer->width);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->height % 2) {
- ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+ if (mLockedBuffer->height % 2) {
+ ALOGE("Height is not multiple of 2 %d", mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->stride < (mBuffer->width * 12 / 8)) {
+ if (mLockedBuffer->stride < (mLockedBuffer->width * 12 / 8)) {
ALOGE("stride (%d) should be at least %d",
- mBuffer->stride, mBuffer->width * 12 / 8);
+ mLockedBuffer->stride, mLockedBuffer->width * 12 / 8);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
// Single plane, 32bpp.
bytesPerPixel = 4;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RGB_565:
// Single plane, 16bpp.
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RGB_888:
// Single plane, 24bpp.
bytesPerPixel = 3;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
default:
ALOGE("Pixel format: 0x%x is unsupported", fmt);
@@ -602,6 +705,12 @@
__FUNCTION__, image, pixelStride);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlanePixelStride(planeIdx, pixelStride);
}
@@ -614,6 +723,12 @@
__FUNCTION__, image, rowStride);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlaneRowStride(planeIdx, rowStride);
}
@@ -627,5 +742,11 @@
__FUNCTION__, image, data, dataLength);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlaneData(planeIdx, data, dataLength);
}
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index 89d2b7c..e01dcc7 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -21,6 +21,7 @@
#include <utils/Log.h>
#include <utils/StrongPointer.h>
+#include <gui/BufferItem.h>
#include <gui/CpuConsumer.h>
#include "NdkImageReaderPriv.h"
@@ -31,9 +32,9 @@
// TODO: this only supports ImageReader
struct AImage {
- AImage(AImageReader* reader, int32_t format,
- CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
- int32_t width, int32_t height, int32_t numPlanes);
+ AImage(AImageReader* reader, int32_t format, uint64_t usage,
+ BufferItem* buffer, int64_t timestamp,
+ int32_t width, int32_t height, int32_t numPlanes);
// free all resources while keeping object alive. Caller must obtain reader lock
void close();
@@ -54,6 +55,9 @@
media_status_t getNumPlanes(/*out*/int32_t* numPlanes) const;
media_status_t getTimestamp(/*out*/int64_t* timestamp) const;
+ media_status_t lockImage();
+ media_status_t unlockImageIfLocked(/*out*/int* fenceFd);
+
media_status_t getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const;
media_status_t getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const;
media_status_t getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const;
@@ -69,7 +73,9 @@
// When reader is close, AImage will only accept close API call
wp<AImageReader> mReader;
const int32_t mFormat;
- CpuConsumer::LockedBuffer* mBuffer;
+ const uint64_t mUsage; // AHARDWAREBUFFER_USAGE0* flags.
+ BufferItem* mBuffer;
+ std::unique_ptr<CpuConsumer::LockedBuffer> mLockedBuffer;
const int64_t mTimestamp;
const int32_t mWidth;
const int32_t mHeight;
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index ab3829e..e580dae 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -24,7 +24,9 @@
#include <cutils/atomic.h>
#include <utils/Log.h>
+#include <android_media_Utils.h>
#include <android_runtime/android_view_Surface.h>
+#include <android_runtime/android_hardware_HardwareBuffer.h>
using namespace android;
@@ -36,6 +38,7 @@
}
}
+const int32_t AImageReader::kDefaultUsage = AHARDWAREBUFFER_USAGE0_CPU_READ_OFTEN;
const char* AImageReader::kCallbackFpKey = "Callback";
const char* AImageReader::kContextKey = "Context";
@@ -151,10 +154,18 @@
}
}
-AImageReader::AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages) :
- mWidth(width), mHeight(height), mFormat(format), mMaxImages(maxImages),
- mNumPlanes(getNumPlanesForFormat(format)),
- mFrameListener(new FrameListener(this)) {}
+AImageReader::AImageReader(int32_t width,
+ int32_t height,
+ int32_t format,
+ uint64_t usage,
+ int32_t maxImages)
+ : mWidth(width),
+ mHeight(height),
+ mFormat(format),
+ mUsage(usage),
+ mMaxImages(maxImages),
+ mNumPlanes(getNumPlanesForFormat(format)),
+ mFrameListener(new FrameListener(this)) {}
media_status_t
AImageReader::init() {
@@ -162,42 +173,44 @@
mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
+ uint64_t producerUsage;
+ uint64_t consumerUsage;
+ android_hardware_HardwareBuffer_convertToGrallocUsageBits(
+ &producerUsage, &consumerUsage, mUsage, 0);
+
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
- sp<CpuConsumer> cpuConsumer;
- String8 consumerName = String8::format("ImageReader-%dx%df%xm%d-%d-%d",
- mWidth, mHeight, mFormat, mMaxImages, getpid(),
- createProcessUniqueId());
+ String8 consumerName = String8::format(
+ "ImageReader-%dx%df%xu%" PRIu64 "m%d-%d-%d", mWidth, mHeight, mFormat, mUsage,
+ mMaxImages, getpid(), createProcessUniqueId());
- cpuConsumer = new CpuConsumer(gbConsumer, mMaxImages, /*controlledByApp*/true);
- if (cpuConsumer == nullptr) {
- ALOGE("Failed to allocate CpuConsumer");
+ mBufferItemConsumer =
+ new BufferItemConsumer(gbConsumer, consumerUsage, mMaxImages, /*controlledByApp*/ true);
+ if (mBufferItemConsumer == nullptr) {
+ ALOGE("Failed to allocate BufferItemConsumer");
return AMEDIA_ERROR_UNKNOWN;
}
- mCpuConsumer = cpuConsumer;
- mCpuConsumer->setName(consumerName);
mProducer = gbProducer;
-
- sp<ConsumerBase> consumer = cpuConsumer;
- consumer->setFrameAvailableListener(mFrameListener);
+ mBufferItemConsumer->setName(consumerName);
+ mBufferItemConsumer->setFrameAvailableListener(mFrameListener);
status_t res;
- res = cpuConsumer->setDefaultBufferSize(mWidth, mHeight);
+ res = mBufferItemConsumer->setDefaultBufferSize(mWidth, mHeight);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer size");
+ ALOGE("Failed to set BufferItemConsumer buffer size");
return AMEDIA_ERROR_UNKNOWN;
}
- res = cpuConsumer->setDefaultBufferFormat(mHalFormat);
+ res = mBufferItemConsumer->setDefaultBufferFormat(mHalFormat);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer format");
+ ALOGE("Failed to set BufferItemConsumer buffer format");
return AMEDIA_ERROR_UNKNOWN;
}
- res = cpuConsumer->setDefaultBufferDataSpace(mHalDataSpace);
+ res = mBufferItemConsumer->setDefaultBufferDataSpace(mHalDataSpace);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer dataSpace");
+ ALOGE("Failed to set BufferItemConsumer buffer dataSpace");
return AMEDIA_ERROR_UNKNOWN;
}
@@ -209,7 +222,7 @@
mWindow = static_cast<ANativeWindow*>(mSurface.get());
for (int i = 0; i < mMaxImages; i++) {
- CpuConsumer::LockedBuffer* buffer = new CpuConsumer::LockedBuffer;
+ BufferItem* buffer = new BufferItem;
mBuffers.push_back(buffer);
}
@@ -248,133 +261,136 @@
image->close();
}
- // Delete LockedBuffers
+ // Delete Buffer Items
for (auto it = mBuffers.begin();
it != mBuffers.end(); it++) {
delete *it;
}
- if (mCpuConsumer != nullptr) {
- mCpuConsumer->abandon();
- mCpuConsumer->setFrameAvailableListener(nullptr);
+ if (mBufferItemConsumer != nullptr) {
+ mBufferItemConsumer->abandon();
+ mBufferItemConsumer->setFrameAvailableListener(nullptr);
}
}
media_status_t
-AImageReader::acquireCpuConsumerImageLocked(/*out*/AImage** image) {
+AImageReader::acquireImageLocked(/*out*/AImage** image) {
*image = nullptr;
- CpuConsumer::LockedBuffer* buffer = getLockedBufferLocked();
+ BufferItem* buffer = getBufferItemLocked();
if (buffer == nullptr) {
ALOGW("Unable to acquire a lockedBuffer, very likely client tries to lock more than"
" maxImages buffers");
return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
}
- status_t res = mCpuConsumer->lockNextBuffer(buffer);
+ status_t res = mBufferItemConsumer->acquireBuffer(buffer, 0);
if (res != NO_ERROR) {
- returnLockedBufferLocked(buffer);
- if (res != BAD_VALUE /*no buffers*/) {
- if (res == NOT_ENOUGH_DATA) {
+ returnBufferItemLocked(buffer);
+ if (res != BufferQueue::NO_BUFFER_AVAILABLE) {
+ if (res == INVALID_OPERATION) {
return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
} else {
- ALOGE("%s Fail to lockNextBuffer with error: %d ",
- __FUNCTION__, res);
+ ALOGE("%s: Acquire image failed with some unknown error: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
return AMEDIA_ERROR_UNKNOWN;
}
}
return AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE;
}
- if (buffer->flexFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
- ALOGE("NV21 format is not supported by AImageReader");
- return AMEDIA_ERROR_UNSUPPORTED;
- }
+ const int bufferWidth = getBufferWidth(buffer);
+ const int bufferHeight = getBufferHeight(buffer);
+ const int bufferFmt = buffer->mGraphicBuffer->getPixelFormat();
- // Check if the left-top corner of the crop rect is origin, we currently assume this point is
- // zero, will revist this once this assumption turns out problematic.
- Point lt = buffer->crop.leftTop();
- if (lt.x != 0 || lt.y != 0) {
- ALOGE("crop left top corner [%d, %d] need to be at origin", lt.x, lt.y);
- return AMEDIA_ERROR_UNKNOWN;
- }
+ const int readerWidth = mWidth;
+ const int readerHeight = mHeight;
+ const int readerFmt = mHalFormat;
- // Check if the producer buffer configurations match what ImageReader configured.
- int outputWidth = getBufferWidth(buffer);
- int outputHeight = getBufferHeight(buffer);
-
- int readerFmt = mHalFormat;
- int readerWidth = mWidth;
- int readerHeight = mHeight;
-
- if ((buffer->format != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
- (readerWidth != outputWidth || readerHeight != outputHeight)) {
- ALOGW("%s: Producer buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
- __FUNCTION__, outputWidth, outputHeight, readerWidth, readerHeight);
- }
-
- int bufFmt = buffer->format;
- if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888) {
- bufFmt = buffer->flexFormat;
- }
-
- if (readerFmt != bufFmt) {
- if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888 && (bufFmt ==
- HAL_PIXEL_FORMAT_YCrCb_420_SP || bufFmt == HAL_PIXEL_FORMAT_YV12)) {
- // Special casing for when producer switches to a format compatible with flexible YUV
- // (HAL_PIXEL_FORMAT_YCbCr_420_888).
- mHalFormat = bufFmt;
- ALOGD("%s: Overriding buffer format YUV_420_888 to %x.", __FUNCTION__, bufFmt);
- } else {
- // Return the buffer to the queue.
- mCpuConsumer->unlockBuffer(*buffer);
- returnLockedBufferLocked(buffer);
-
- ALOGE("Producer output buffer format: 0x%x, ImageReader configured format: 0x%x",
- buffer->format, readerFmt);
-
+ // Check if the producer buffer configurations match what AImageReader configured. Add some
+ // extra checks for non-opaque formats.
+ if (!isFormatOpaque(readerFmt)) {
+ // Check if the left-top corner of the crop rect is origin, we currently assume this point
+ // is zero, will revisit this once this assumption turns out problematic.
+ Point lt = buffer->mCrop.leftTop();
+ if (lt.x != 0 || lt.y != 0) {
+ ALOGE("Crop left top corner [%d, %d] not at origin", lt.x, lt.y);
return AMEDIA_ERROR_UNKNOWN;
}
+
+ // Check if the producer buffer configurations match what ImageReader configured.
+ if ((bufferFmt != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
+ (readerWidth != bufferWidth || readerHeight != bufferHeight)) {
+ ALOGW("%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
+ __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
+ }
+
+ if (readerFmt != bufferFmt) {
+ if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888 && isPossiblyYUV(bufferFmt)) {
+ // Special casing for when producer switches to a format compatible with flexible
+ // YUV.
+ mHalFormat = bufferFmt;
+ ALOGD("%s: Overriding buffer format YUV_420_888 to 0x%x.", __FUNCTION__, bufferFmt);
+ } else {
+ // Return the buffer to the queue. No need to provide fence, as this buffer wasn't
+ // used anywhere yet.
+ mBufferItemConsumer->releaseBuffer(*buffer);
+ returnBufferItemLocked(buffer);
+
+ ALOGE("%s: Output buffer format: 0x%x, ImageReader configured format: 0x%x",
+ __FUNCTION__, bufferFmt, readerFmt);
+
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ }
}
if (mHalFormat == HAL_PIXEL_FORMAT_BLOB) {
- *image = new AImage(this, mFormat, buffer, buffer->timestamp,
+ *image = new AImage(this, mFormat, mUsage, buffer, buffer->mTimestamp,
readerWidth, readerHeight, mNumPlanes);
} else {
- *image = new AImage(this, mFormat, buffer, buffer->timestamp,
- outputWidth, outputHeight, mNumPlanes);
+ *image = new AImage(this, mFormat, mUsage, buffer, buffer->mTimestamp,
+ bufferWidth, bufferHeight, mNumPlanes);
}
mAcquiredImages.push_back(*image);
return AMEDIA_OK;
}
-CpuConsumer::LockedBuffer*
-AImageReader::getLockedBufferLocked() {
+BufferItem*
+AImageReader::getBufferItemLocked() {
if (mBuffers.empty()) {
return nullptr;
}
- // Return a LockedBuffer pointer and remove it from the list
+ // Return a BufferItem pointer and remove it from the list
auto it = mBuffers.begin();
- CpuConsumer::LockedBuffer* buffer = *it;
+ BufferItem* buffer = *it;
mBuffers.erase(it);
return buffer;
}
void
-AImageReader::returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer) {
+AImageReader::returnBufferItemLocked(BufferItem* buffer) {
mBuffers.push_back(buffer);
}
void
AImageReader::releaseImageLocked(AImage* image) {
- CpuConsumer::LockedBuffer* buffer = image->mBuffer;
+ BufferItem* buffer = image->mBuffer;
if (buffer == nullptr) {
// This should not happen, but is not fatal
ALOGW("AImage %p has no buffer!", image);
return;
}
- mCpuConsumer->unlockBuffer(*buffer);
- returnLockedBufferLocked(buffer);
+ int fenceFd = -1;
+ media_status_t ret = image->unlockImageIfLocked(&fenceFd);
+ if (ret < 0) {
+ ALOGW("%s: AImage %p is cannot be unlocked.", __FUNCTION__, image);
+ return;
+ }
+
+ sp<Fence> releaseFence = fenceFd > 0 ? new Fence(fenceFd) : Fence::NO_FENCE;
+ mBufferItemConsumer->releaseBuffer(*buffer, releaseFence);
+ returnBufferItemLocked(buffer);
image->mBuffer = nullptr;
bool found = false;
@@ -395,29 +411,31 @@
}
int
-AImageReader::getBufferWidth(CpuConsumer::LockedBuffer* buffer) {
- if (buffer == nullptr) return -1;
+AImageReader::getBufferWidth(BufferItem* buffer) {
+ if (buffer == NULL) return -1;
- if (!buffer->crop.isEmpty()) {
- return buffer->crop.getWidth();
+ if (!buffer->mCrop.isEmpty()) {
+ return buffer->mCrop.getWidth();
}
- return buffer->width;
+
+ return buffer->mGraphicBuffer->getWidth();
}
int
-AImageReader::getBufferHeight(CpuConsumer::LockedBuffer* buffer) {
- if (buffer == nullptr) return -1;
+AImageReader::getBufferHeight(BufferItem* buffer) {
+ if (buffer == NULL) return -1;
- if (!buffer->crop.isEmpty()) {
- return buffer->crop.getHeight();
+ if (!buffer->mCrop.isEmpty()) {
+ return buffer->mCrop.getHeight();
}
- return buffer->height;
+
+ return buffer->mGraphicBuffer->getHeight();
}
media_status_t
AImageReader::acquireNextImage(/*out*/AImage** image) {
Mutex::Autolock _l(mLock);
- return acquireCpuConsumerImageLocked(image);
+ return acquireImageLocked(image);
}
media_status_t
@@ -429,12 +447,12 @@
*image = nullptr;
AImage* prevImage = nullptr;
AImage* nextImage = nullptr;
- media_status_t ret = acquireCpuConsumerImageLocked(&prevImage);
+ media_status_t ret = acquireImageLocked(&prevImage);
if (prevImage == nullptr) {
return ret;
}
for (;;) {
- ret = acquireCpuConsumerImageLocked(&nextImage);
+ ret = acquireImageLocked(&nextImage);
if (nextImage == nullptr) {
*image = prevImage;
return AMEDIA_OK;
@@ -464,6 +482,12 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ if (maxImages > BufferQueueDefs::NUM_BUFFER_SLOTS) {
+ ALOGE("%s: max outstanding image count (%d) cannot be larget than %d.",
+ __FUNCTION__, maxImages, BufferQueueDefs::NUM_BUFFER_SLOTS);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
if (!AImageReader::isSupportedFormat(format)) {
ALOGE("%s: format %d is not supported by AImageReader",
__FUNCTION__, format);
@@ -475,8 +499,10 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- //*reader = new AImageReader(width, height, format, maxImages);
- AImageReader* tmpReader = new AImageReader(width, height, format, maxImages);
+ // Set consumer usage to AHARDWAREBUFFER_USAGE0_CPU_READ_OFTEN by default so that
+ // AImageReader_new behaves as if it's backed by CpuConsumer.
+ AImageReader* tmpReader = new AImageReader(
+ width, height, format, AImageReader::kDefaultUsage, maxImages);
if (tmpReader == nullptr) {
ALOGE("%s: AImageReader allocation failed", __FUNCTION__);
return AMEDIA_ERROR_UNKNOWN;
@@ -565,7 +591,7 @@
media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) {
ALOGV("%s", __FUNCTION__);
if (reader == nullptr || image == nullptr) {
- ALOGE("%s: invalid argument. reader %p, maxImages %p",
+ ALOGE("%s: invalid argument. reader %p, image %p",
__FUNCTION__, reader, image);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
@@ -576,7 +602,7 @@
media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image) {
ALOGV("%s", __FUNCTION__);
if (reader == nullptr || image == nullptr) {
- ALOGE("%s: invalid argument. reader %p, maxImages %p",
+ ALOGE("%s: invalid argument. reader %p, image %p",
__FUNCTION__, reader, image);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index 8b540fa..8becb1d 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -25,7 +25,8 @@
#include <utils/Mutex.h>
#include <utils/StrongPointer.h>
-#include <gui/CpuConsumer.h>
+#include <gui/BufferItem.h>
+#include <gui/BufferItemConsumer.h>
#include <gui/Surface.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -48,11 +49,16 @@
struct AImageReader : public RefBase {
public:
+ static const int32_t kDefaultUsage;
static bool isSupportedFormat(int32_t format);
static int getNumPlanesForFormat(int32_t format);
- AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages);
+ AImageReader(int32_t width,
+ int32_t height,
+ int32_t format,
+ uint64_t usage,
+ int32_t maxImages);
~AImageReader();
// Inintialize AImageReader, uninitialized or failed to initialize AImageReader
@@ -68,22 +74,24 @@
int32_t getWidth() const { return mWidth; };
int32_t getHeight() const { return mHeight; };
int32_t getFormat() const { return mFormat; };
+ uint64_t getUsage() const { return mUsage; };
int32_t getMaxImages() const { return mMaxImages; };
-
private:
friend struct AImage; // for grabing reader lock
- media_status_t acquireCpuConsumerImageLocked(/*out*/AImage** image);
- CpuConsumer::LockedBuffer* getLockedBufferLocked();
- void returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer);
+ BufferItem* getBufferItemLocked();
+ void returnBufferItemLocked(BufferItem* buffer);
+
+ // Called by AImageReader_acquireXXX to acquire a Buffer and setup AImage.
+ media_status_t acquireImageLocked(/*out*/AImage** image);
// Called by AImage to close image
void releaseImageLocked(AImage* image);
- static int getBufferWidth(CpuConsumer::LockedBuffer* buffer);
- static int getBufferHeight(CpuConsumer::LockedBuffer* buffer);
+ static int getBufferWidth(BufferItem* buffer);
+ static int getBufferHeight(BufferItem* buffer);
media_status_t setImageListenerLocked(AImageReader_ImageListener* listener);
@@ -102,12 +110,15 @@
};
sp<CallbackHandler> mHandler;
sp<ALooper> mCbLooper; // Looper thread where callbacks actually happen on
+ List<BufferItem*> mBuffers;
- List<CpuConsumer::LockedBuffer*> mBuffers;
const int32_t mWidth;
const int32_t mHeight;
const int32_t mFormat;
+ const uint64_t mUsage;
const int32_t mMaxImages;
+
+ // TODO(jwcai) Seems completely unused in AImageReader class.
const int32_t mNumPlanes;
struct FrameListener : public ConsumerBase::FrameAvailableListener {
@@ -130,7 +141,7 @@
sp<IGraphicBufferProducer> mProducer;
sp<Surface> mSurface;
- sp<CpuConsumer> mCpuConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
sp<ANativeWindow> mWindow;
List<AImage*> mAcquiredImages;
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index b1b0362..bbf33cd 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -81,7 +81,8 @@
ssize_t AMediaMuxer_addTrack(AMediaMuxer *muxer, const AMediaFormat *format) {
sp<AMessage> msg;
AMediaFormat_getFormat(format, &msg);
- return translate_error(muxer->mImpl->addTrack(msg));
+ ssize_t ret = muxer->mImpl->addTrack(msg);
+ return (ret >= 0) ? ret : translate_error(ret);
}
EXPORT
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 301c5b1..8ef31d1 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -35,7 +35,7 @@
// This packed representation is used to keep the information atomic.
union FastTrackUnderruns {
FastTrackUnderruns() { mAtomic = 0;
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); }
+ static_assert(sizeof(FastTrackUnderruns) == sizeof(uint32_t), "FastTrackUnderrun"); }
FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { }
FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs)
{ if (this != &rhs) mAtomic = rhs.mAtomic; return *this; }
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 895818c..4ab7604 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -418,6 +418,25 @@
(mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
}
+
+ // if SCO headset is connected and we are told to use it, play ringtone over
+ // speaker and BT SCO
+ if (((availableOutputDevicesType & AUDIO_DEVICE_OUT_ALL_SCO) != 0) &&
+ (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO)) {
+ uint32_t device2 = AUDIO_DEVICE_NONE;
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+ if (device2 == AUDIO_DEVICE_NONE) {
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+ }
+ if (device2 == AUDIO_DEVICE_NONE) {
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+ }
+
+ if (device2 != AUDIO_DEVICE_NONE) {
+ device |= device2;
+ break;
+ }
+ }
// The second device used for sonification is the same as the device used by media strategy
// FALL THROUGH
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 702c92d..4318a11 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -423,20 +423,18 @@
// Remove cached shim parameters
state->setShimParams(CameraParameters());
- // Remove the client from the list of active clients
+ // Remove the client from the list of active clients, if there is one
clientToDisconnect = removeClientLocked(id);
+ }
+ // Disconnect client
+ if (clientToDisconnect.get() != nullptr) {
+ ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
+ __FUNCTION__, id.string());
// Notify the client of disconnection
clientToDisconnect->notifyError(
hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
CaptureResultExtras{});
- }
-
- ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
- __FUNCTION__, id.string());
-
- // Disconnect client
- if (clientToDisconnect.get() != nullptr) {
// Ensure not in binder RPC so client disconnect PID checks work correctly
LOG_ALWAYS_FATAL_IF(getCallingPid() != getpid(),
"onDeviceStatusChanged must be called from the camera service process!");
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index f6ad7d7..f8b2908 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -21,6 +21,7 @@
#include "CameraProviderManager.h"
#include <chrono>
+#include <inttypes.h>
#include <hidl/ServiceManagement.h>
namespace android {
@@ -65,7 +66,7 @@
}
// See if there's a passthrough HAL, but let's not complain if there's not
- addProvider(kLegacyProviderName, /*expected*/ false);
+ addProviderLocked(kLegacyProviderName, /*expected*/ false);
return OK;
}
@@ -194,14 +195,19 @@
for (auto& provider : mProviders) {
hardware::hidl_vec<VendorTagSection> vts;
Status status;
- provider->mInterface->getVendorTags(
+ hardware::Return<void> ret;
+ ret = provider->mInterface->getVendorTags(
[&](auto s, const auto& vendorTagSecs) {
status = s;
if (s == Status::OK) {
vts = vendorTagSecs;
}
});
-
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
+ __FUNCTION__, provider->mProviderName.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
if (status != Status::OK) {
return mapToStatusT(status);
}
@@ -239,13 +245,19 @@
auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
Status status;
- deviceInfo3->mInterface->open(callback, [&status, &session]
+ hardware::Return<void> ret;
+ ret = deviceInfo3->mInterface->open(callback, [&status, &session]
(Status s, const sp<device::V3_2::ICameraDeviceSession>& cameraSession) {
status = s;
if (status == Status::OK) {
*session = cameraSession;
}
});
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error opening a session for camera device %s: %s",
+ __FUNCTION__, id.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
return mapToStatusT(status);
}
@@ -262,7 +274,12 @@
auto *deviceInfo1 = static_cast<ProviderInfo::DeviceInfo1*>(deviceInfo);
- Status status = deviceInfo1->mInterface->open(callback);
+ hardware::Return<Status> status = deviceInfo1->mInterface->open(callback);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error opening a session for camera device %s: %s",
+ __FUNCTION__, id.c_str(), status.description().c_str());
+ return DEAD_OBJECT;
+ }
if (status == Status::OK) {
*session = deviceInfo1->mInterface;
}
@@ -276,7 +293,7 @@
bool /*preexisting*/) {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- addProvider(name);
+ addProviderLocked(name);
return hardware::Return<void>();
}
@@ -304,7 +321,7 @@
}
-status_t CameraProviderManager::addProvider(const std::string& newProvider, bool expected) {
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
for (const auto& providerInfo : mProviders) {
if (providerInfo->mProviderName == newProvider) {
ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
@@ -312,13 +329,14 @@
return ALREADY_EXISTS;
}
}
- sp<provider::V2_4::ICameraProvider> interface =
- mServiceProxy->getService(newProvider);
+
+ sp<provider::V2_4::ICameraProvider> interface;
+ interface = mServiceProxy->getService(newProvider);
if (interface == nullptr) {
- ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
- newProvider.c_str());
if (expected) {
+ ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+ newProvider.c_str());
return BAD_VALUE;
} else {
return OK;
@@ -338,15 +356,39 @@
}
status_t CameraProviderManager::removeProvider(const std::string& provider) {
+ std::unique_lock<std::mutex> lock(mInterfaceMutex);
+ std::vector<String8> removedDeviceIds;
+ status_t res = NAME_NOT_FOUND;
for (auto it = mProviders.begin(); it != mProviders.end(); it++) {
if ((*it)->mProviderName == provider) {
+ removedDeviceIds.reserve((*it)->mDevices.size());
+ for (auto& deviceInfo : (*it)->mDevices) {
+ removedDeviceIds.push_back(String8(deviceInfo->mId.c_str()));
+ }
mProviders.erase(it);
- return OK;
+ res = OK;
+ break;
}
}
- ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
- provider.c_str());
- return NAME_NOT_FOUND;
+ if (res != OK) {
+ ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
+ provider.c_str());
+ } else {
+ // Inform camera service of loss of presence for all the devices from this provider,
+ // without lock held for reentrancy
+ sp<StatusListener> listener = getStatusListener();
+ if (listener != nullptr) {
+ lock.unlock();
+ for (auto& id : removedDeviceIds) {
+ listener->onDeviceStatusChanged(id, CameraDeviceStatus::NOT_PRESENT);
+ }
+ }
+ }
+ return res;
+}
+
+sp<CameraProviderManager::StatusListener> CameraProviderManager::getStatusListener() const {
+ return mListener.promote();
}
/**** Methods for ProviderInfo ****/
@@ -370,17 +412,31 @@
}
ALOGI("Connecting to new camera provider: %s, isRemote? %d",
mProviderName.c_str(), mInterface->isRemote());
- Status status = mInterface->setCallback(this);
+ hardware::Return<Status> status = mInterface->setCallback(this);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
+ __FUNCTION__, mProviderName.c_str(), status.description().c_str());
+ return DEAD_OBJECT;
+ }
if (status != Status::OK) {
ALOGE("%s: Unable to register callbacks with camera provider '%s'",
__FUNCTION__, mProviderName.c_str());
return mapToStatusT(status);
}
- // TODO: Register for hw binder death notifications as well
+
+ hardware::Return<bool> linked = mInterface->linkToDeath(this, /*cookie*/ mId);
+ if (!linked.isOk()) {
+ ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
+ __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+ return DEAD_OBJECT;
+ } else if (!linked) {
+ ALOGW("%s: Unable to link to provider '%s' death notifications",
+ __FUNCTION__, mProviderName.c_str());
+ }
// Get initial list of camera devices, if any
std::vector<std::string> devices;
- mInterface->getCameraIdList([&status, &devices](
+ hardware::Return<void> ret = mInterface->getCameraIdList([&status, &devices](
Status idStatus,
const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
status = idStatus;
@@ -389,18 +445,29 @@
devices.push_back(cameraDeviceNames[i]);
}
} });
-
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error in getting camera ID list from provider '%s': %s",
+ __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+ return DEAD_OBJECT;
+ }
if (status != Status::OK) {
ALOGE("%s: Unable to query for camera devices from provider '%s'",
__FUNCTION__, mProviderName.c_str());
return mapToStatusT(status);
}
+ sp<StatusListener> listener = mManager->getStatusListener();
for (auto& device : devices) {
- status_t res = addDevice(device);
+ std::string id;
+ status_t res = addDevice(device,
+ hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT, &id);
if (res != OK) {
ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
__FUNCTION__, device.c_str(), strerror(-res), res);
+ continue;
+ }
+ if (listener != nullptr) {
+ listener->onDeviceStatusChanged(String8(id.c_str()), CameraDeviceStatus::PRESENT);
}
}
@@ -462,8 +529,9 @@
}
status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
- dprintf(fd, "== Camera Provider HAL %s (v2.4) static info: %zu devices: ==\n",
- mProviderName.c_str(), mDevices.size());
+ dprintf(fd, "== Camera Provider HAL %s (v2.4, %s) static info: %zu devices: ==\n",
+ mProviderName.c_str(), mInterface->isRemote() ? "remote" : "passthrough",
+ mDevices.size());
for (auto& device : mDevices) {
dprintf(fd, "== Camera HAL device %s (v%d.%d) static information: ==\n", device->mName.c_str(),
@@ -512,7 +580,7 @@
sp<StatusListener> listener;
std::string id;
{
- std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
+ std::lock_guard<std::mutex> lock(mLock);
bool known = false;
for (auto& deviceInfo : mDevices) {
if (deviceInfo->mName == cameraDeviceName) {
@@ -534,7 +602,7 @@
}
addDevice(cameraDeviceName, newStatus, &id);
}
- listener = mManager->mListener.promote();
+ listener = mManager->getStatusListener();
}
// Call without lock held to allow reentrancy into provider manager
if (listener != nullptr) {
@@ -565,7 +633,7 @@
mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
return hardware::Void();
}
- listener = mManager->mListener.promote();
+ listener = mManager->getStatusListener();
}
// Call without lock held to allow reentrancy into provider manager
if (listener != nullptr) {
@@ -574,6 +642,16 @@
return hardware::Void();
}
+void CameraProviderManager::ProviderInfo::serviceDied(uint64_t cookie,
+ const wp<hidl::base::V1_0::IBase>& who) {
+ (void) who;
+ ALOGI("Camera provider '%s' has died; removing it", mProviderName.c_str());
+ if (cookie != mId) {
+ ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
+ __FUNCTION__, cookie, mId);
+ }
+ mManager->removeProvider(mProviderName);
+}
template<class DeviceInfoT>
std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
@@ -615,11 +693,17 @@
<device::V1_0::ICameraDevice>(const std::string &name) const {
Status status;
sp<device::V1_0::ICameraDevice> cameraInterface;
- mInterface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
Status s, sp<device::V1_0::ICameraDevice> interface) {
status = s;
cameraInterface = interface;
});
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
+ __FUNCTION__, name.c_str(), ret.description().c_str());
+ return nullptr;
+ }
if (status != Status::OK) {
ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
name.c_str(), statusToString(status));
@@ -634,11 +718,17 @@
<device::V3_2::ICameraDevice>(const std::string &name) const {
Status status;
sp<device::V3_2::ICameraDevice> cameraInterface;
- mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
Status s, sp<device::V3_2::ICameraDevice> interface) {
status = s;
cameraInterface = interface;
});
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
+ __FUNCTION__, name.c_str(), ret.description().c_str());
+ return nullptr;
+ }
if (status != Status::OK) {
ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
name.c_str(), statusToString(status));
@@ -665,25 +755,37 @@
mInterface(interface) {
// Get default parameters and initialize flash unit availability
// Requires powering on the camera device
- Status status = mInterface->open(nullptr);
- if (status != Status::OK) {
- ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s (%d)", __FUNCTION__,
- mId.c_str(), CameraProviderManager::statusToString(status), status);
+ hardware::Return<Status> status = mInterface->open(nullptr);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error opening camera device %s to check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
return;
}
- mInterface->getParameters([this](const hardware::hidl_string& parms) {
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s", __FUNCTION__,
+ mId.c_str(), CameraProviderManager::statusToString(status));
+ return;
+ }
+ hardware::Return<void> ret;
+ ret = mInterface->getParameters([this](const hardware::hidl_string& parms) {
mDefaultParameters.unflatten(String8(parms.c_str()));
});
-
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error reading camera device %s params to check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
+ return;
+ }
const char *flashMode =
mDefaultParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
mHasFlashUnit = true;
- } else {
- mHasFlashUnit = false;
}
- mInterface->close();
+ ret = mInterface->close();
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error closing camera device %s after check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
+ }
}
CameraProviderManager::ProviderInfo::DeviceInfo1::~DeviceInfo1() {}
@@ -698,10 +800,16 @@
Status status;
device::V1_0::CameraInfo cInfo;
- mInterface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
status = s;
cInfo = camInfo;
});
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error reading camera info from device %s: %s",
+ __FUNCTION__, mId.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
if (status != Status::OK) {
return mapToStatusT(status);
}
@@ -716,7 +824,8 @@
info->facing = hardware::CAMERA_FACING_FRONT;
break;
default:
- ALOGW("%s: Unknown camera facing: %d", __FUNCTION__, cInfo.facing);
+ ALOGW("%s: Device %s: Unknown camera facing: %d",
+ __FUNCTION__, mId.c_str(), cInfo.facing);
info->facing = hardware::CAMERA_FACING_BACK;
}
info->orientation = cInfo.orientation;
@@ -733,7 +842,8 @@
mInterface(interface) {
// Get camera characteristics and initialize flash unit availability
Status status;
- mInterface->getCameraCharacteristics([&status, this](Status s,
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraCharacteristics([&status, this](Status s,
device::V3_2::CameraMetadata metadata) {
status = s;
if (s == Status::OK) {
@@ -742,6 +852,12 @@
mCameraCharacteristics = buffer;
}
});
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error getting camera characteristics for device %s"
+ " to check for a flash unit: %s", __FUNCTION__, mId.c_str(),
+ ret.description().c_str());
+ return;
+ }
if (status != Status::OK) {
ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
__FUNCTION__, mId.c_str(), CameraProviderManager::statusToString(status), status);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 5ae16cd..b1da831 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -226,7 +226,10 @@
static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
- struct ProviderInfo : virtual public hardware::camera::provider::V2_4::ICameraProviderCallback {
+ struct ProviderInfo :
+ virtual public hardware::camera::provider::V2_4::ICameraProviderCallback,
+ virtual public hardware::hidl_death_recipient
+ {
const std::string mProviderName;
const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
@@ -254,6 +257,9 @@
const hardware::hidl_string& cameraDeviceName,
hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+ // hidl_death_recipient interface - this locks the parent mInterfaceMutex
+ virtual void serviceDied(uint64_t cookie, const wp<hidl::base::V1_0::IBase>& who) override;
+
// Basic device information, common to all camera devices
struct DeviceInfo {
const std::string mName; // Full instance name
@@ -327,6 +333,8 @@
std::string mType;
uint32_t mId;
+ std::mutex mLock;
+
CameraProviderManager *mManager;
// Templated method to instantiate the right kind of DeviceInfo and call the
@@ -357,8 +365,10 @@
hardware::hidl_version minVersion = hardware::hidl_version{0,0},
hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
- status_t addProvider(const std::string& newProvider, bool expected = true);
+ status_t addProviderLocked(const std::string& newProvider, bool expected = true);
+
status_t removeProvider(const std::string& provider);
+ sp<StatusListener> getStatusListener() const;
bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 60c716f..71e52af 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2452,13 +2452,14 @@
status_t Camera3Device::registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride,
+ bool hasAppCallback) {
ATRACE_CALL();
Mutex::Autolock l(mInFlightLock);
ssize_t res;
res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
- aeTriggerCancelOverride));
+ aeTriggerCancelOverride, hasAppCallback));
if (res < 0) return res;
if (mInFlightMap.size() == 1) {
@@ -2512,8 +2513,9 @@
(request.haveResultMetadata && shutterTimestamp != 0))) {
ATRACE_ASYNC_END("frame capture", frameNumber);
- // Sanity check - if sensor timestamp matches shutter timestamp
- if (request.requestStatus == OK &&
+ // Sanity check - if sensor timestamp matches shutter timestamp in the
+ // case of request having callback.
+ if (request.hasCallback && request.requestStatus == OK &&
sensorTimestamp != shutterTimestamp) {
SET_ERR("sensor timestamp (%" PRId64
") for frame %d doesn't match shutter timestamp (%" PRId64 ")",
@@ -2702,10 +2704,10 @@
InFlightRequest &request = mInFlightMap.editValueAt(idx);
ALOGVV("%s: got InFlightRequest requestId = %" PRId32
", frameNumber = %" PRId64 ", burstId = %" PRId32
- ", partialResultCount = %d",
+ ", partialResultCount = %d, hasCallback = %d",
__FUNCTION__, request.resultExtras.requestId,
request.resultExtras.frameNumber, request.resultExtras.burstId,
- result->partial_result);
+ result->partial_result, request.hasCallback);
// Always update the partial count to the latest one if it's not 0
// (buffers only). When framework aggregates adjacent partial results
// into one, the latest partial count will be used.
@@ -2743,7 +2745,7 @@
}
}
- if (isPartialResult) {
+ if (isPartialResult && request.hasCallback) {
// Send partial capture result
sendPartialCaptureResult(result->result, request.resultExtras, frameNumber,
request.aeTriggerCancelOverride);
@@ -2807,7 +2809,7 @@
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
request.collectedPartialResult = collectedPartialResult;
- } else {
+ } else if (request.hasCallback) {
CameraMetadata metadata;
metadata = result->result;
sendCaptureResult(metadata, request.resultExtras,
@@ -2973,20 +2975,20 @@
}
}
- ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
+ r.shutterTimestamp = msg.timestamp;
+ if (r.hasCallback) {
+ ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
mId.string(), __FUNCTION__,
msg.frame_number, r.resultExtras.requestId, msg.timestamp);
- // Call listener, if any
- if (listener != NULL) {
- listener->notifyShutter(r.resultExtras, msg.timestamp);
+ // Call listener, if any
+ if (listener != NULL) {
+ listener->notifyShutter(r.resultExtras, msg.timestamp);
+ }
+ // send pending result and buffers
+ sendCaptureResult(r.pendingMetadata, r.resultExtras,
+ r.collectedPartialResult, msg.frame_number,
+ r.hasInputBuffer, r.aeTriggerCancelOverride);
}
-
- r.shutterTimestamp = msg.timestamp;
-
- // send pending result and buffers
- sendCaptureResult(r.pendingMetadata, r.resultExtras,
- r.collectedPartialResult, msg.frame_number,
- r.hasInputBuffer, r.aeTriggerCancelOverride);
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
r.pendingOutputBuffers.clear();
@@ -3887,7 +3889,8 @@
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
- for (auto& nextRequest : mNextRequests) {
+ for (size_t i = 0; i < mNextRequests.size(); i++) {
+ auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
@@ -3964,8 +3967,8 @@
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
halRequest->output_buffers = outputBuffers->array();
- for (size_t i = 0; i < captureRequest->mOutputStreams.size(); i++) {
- sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(i);
+ for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
+ sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
// Prepare video buffers for high speed recording on the first video request.
if (mPrepareVideoStream && outputStream->isVideoStream()) {
@@ -3983,8 +3986,8 @@
}
}
- res = outputStream->getBuffer(&outputBuffers->editItemAt(i),
- captureRequest->mOutputSurfaces[i]);
+ res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
+ captureRequest->mOutputSurfaces[j]);
if (res != OK) {
// Can't get output buffer from gralloc queue - this could be due to
// abandoned queue or other consumer misbehavior, so not a fatal
@@ -4006,10 +4009,19 @@
CLOGE("RequestThread: Parent is gone");
return INVALID_OPERATION;
}
+
+ // If this request list is for constrained high speed recording (not
+ // preview), and the current request is not the last one in the batch,
+ // do not send callback to the app.
+ bool hasCallback = true;
+ if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+ hasCallback = false;
+ }
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
- captureRequest->mAeTriggerCancelOverride);
+ captureRequest->mAeTriggerCancelOverride,
+ hasCallback);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 998cc0b..c10b1b4 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -859,6 +859,12 @@
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
AeTriggerCancelOverride_t aeTriggerCancelOverride;
+ // Whether this inflight request's shutter and result callback are to be
+ // called. The policy is that if the request is the last one in the constrained
+ // high speed recording request list, this flag will be true. If the request list
+ // is not for constrained high speed recording, this flag will also be true.
+ bool hasCallback;
+
// Default constructor needed by KeyedVector
InFlightRequest() :
shutterTimestamp(0),
@@ -867,11 +873,12 @@
haveResultMetadata(false),
numBuffersLeft(0),
hasInputBuffer(false),
- aeTriggerCancelOverride({false, 0, false, 0}){
+ aeTriggerCancelOverride({false, 0, false, 0}),
+ hasCallback(true) {
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
- AeTriggerCancelOverride aeTriggerCancelOverride) :
+ AeTriggerCancelOverride aeTriggerCancelOverride, bool hasAppCallback) :
shutterTimestamp(0),
sensorTimestamp(0),
requestStatus(OK),
@@ -879,7 +886,8 @@
numBuffersLeft(numBuffers),
resultExtras(extras),
hasInputBuffer(hasInput),
- aeTriggerCancelOverride(aeTriggerCancelOverride){
+ aeTriggerCancelOverride(aeTriggerCancelOverride),
+ hasCallback(hasAppCallback) {
}
};
@@ -892,7 +900,7 @@
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride, bool callback);
/**
* Override result metadata for cancelling AE precapture trigger applied in
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 8814cf2..5cb7f92 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -34,8 +34,8 @@
libcutils \
libhwbinder \
libhidltransport \
+ libstagefright_omx \
android.hardware.media.omx@1.0 \
- android.hardware.media.omx@1.0-impl \
android.hidl.memory@1.0
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright \
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index ef305b4..38717b5 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -33,7 +33,7 @@
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <hidl/HidlTransportSupport.h>
-#include <omx/hal/1.0/impl/Omx.h>
+#include <omx/1.0/Omx.h>
using namespace android;