Snap for 8948084 from 65a5c11d7c70e523b9f40b5e0b8ccbd4d48d3568 to mainline-appsearch-release
Change-Id: Ia9fe8be47be97abd53315c74607f86f52496a47c
diff --git a/shared/OpenglCodecCommon/GLClientState.cpp b/shared/OpenglCodecCommon/GLClientState.cpp
index 852f36a..a6dbbd6 100644
--- a/shared/OpenglCodecCommon/GLClientState.cpp
+++ b/shared/OpenglCodecCommon/GLClientState.cpp
@@ -1203,7 +1203,7 @@
pack ? 0 : m_pixelStore.unpack_skip_images);
}
-size_t GLClientState::pboNeededDataSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, int pack) const
+size_t GLClientState::pboNeededDataSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, int pack, int ignoreTrailing) const
{
if (width <= 0 || height <= 0 || depth <= 0) return 0;
@@ -1231,7 +1231,8 @@
pack ? 0 : m_pixelStore.unpack_image_height,
pack ? m_pixelStore.pack_skip_pixels : m_pixelStore.unpack_skip_pixels,
pack ? m_pixelStore.pack_skip_rows : m_pixelStore.unpack_skip_rows,
- pack ? 0 : m_pixelStore.unpack_skip_images);
+ pack ? 0 : m_pixelStore.unpack_skip_images,
+ ignoreTrailing);
}
@@ -2101,7 +2102,6 @@
}
void GLClientState::removeRenderbuffers(GLsizei n, const GLuint* renderbuffers) {
- std::vector<std::shared_ptr<RboProps>> to_remove;
bool unbindCurrent = false;
{
RenderbufferInfo::ScopedView view(mRboState.rboData);
@@ -2111,18 +2111,12 @@
if (!rboPtr) {
continue;
}
- to_remove.push_back(rboPtr);
+ unbindCurrent |=
+ (mRboState.boundRenderbuffer == rboPtr);
setFboCompletenessDirtyForRbo(rboPtr);
+ view.remove(renderbuffers[i]);
}
}
-
- for (size_t i = 0; i < to_remove.size(); i++) {
- if (mRboState.boundRenderbuffer == to_remove[i]) {
- unbindCurrent = true;
- break;
- }
- view.remove(to_remove[i]->id);
- }
}
if (unbindCurrent) {
diff --git a/shared/OpenglCodecCommon/GLClientState.h b/shared/OpenglCodecCommon/GLClientState.h
index 0639962..308b737 100644
--- a/shared/OpenglCodecCommon/GLClientState.h
+++ b/shared/OpenglCodecCommon/GLClientState.h
@@ -332,7 +332,7 @@
void setLastEncodedBufferBind(GLenum target, GLuint id);
size_t pixelDataSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, int pack) const;
- size_t pboNeededDataSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, int pack) const;
+ size_t pboNeededDataSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, int pack, int ignoreTrailing = 0) const;
size_t clearBufferNumElts(GLenum buffer) const;
void getPackingOffsets2D(GLsizei width, GLsizei height, GLenum format, GLenum type, int* bpp, int* startOffset, int* pixelRowSize, int* totalRowSize, int* skipRows) const;
void getUnpackingOffsets2D(GLsizei width, GLsizei height, GLenum format, GLenum type, int* bpp, int* startOffset, int* pixelRowSize, int* totalRowSize, int* skipRows) const;
diff --git a/shared/OpenglCodecCommon/GLESTextureUtils.cpp b/shared/OpenglCodecCommon/GLESTextureUtils.cpp
index 87d6312..0fa0e23 100644
--- a/shared/OpenglCodecCommon/GLESTextureUtils.cpp
+++ b/shared/OpenglCodecCommon/GLESTextureUtils.cpp
@@ -1,5 +1,7 @@
#include "GLESTextureUtils.h"
+#include <algorithm>
+
#include "glUtils.h"
#include "etc.h"
#include "astc-codec.h"
@@ -254,7 +256,8 @@
int unpackSkipRows,
int unpackSkipImages,
int* start,
- int* end) {
+ int* end,
+ int ignoreTrailing) {
GLsizei inputWidth = (unpackRowLength == 0) ? width : unpackRowLength;
GLsizei inputPitch = computePitch(inputWidth, format, type, unpackAlignment);
@@ -263,7 +266,17 @@
ALOGV("%s: input idim %d %d %d w p h %d %d %d:", __FUNCTION__, width, height, depth, inputWidth, inputPitch, inputHeight);
int startVal = computePackingOffset(format, type, inputWidth, inputHeight, unpackAlignment, unpackSkipPixels, unpackSkipRows, unpackSkipImages);
- int endVal = startVal + inputPitch * inputHeight * depth;
+ int endVal;
+ if (ignoreTrailing) {
+ // The last row needs to have just enough data per spec, and could
+ // ignore alignment.
+ // b/223402256
+ endVal = startVal + inputPitch * inputHeight * (depth - 1);
+ endVal += inputPitch * (std::min(height, inputHeight) - 1);
+ endVal += computePitch(std::min(width, inputWidth), format, type, 1);
+ } else {
+ endVal = startVal + inputPitch * inputHeight * depth;
+ }
if (start) *start = startVal;
if (end) *end = endVal;
@@ -293,7 +306,8 @@
unpackSkipRows,
unpackSkipImages,
&start,
- &end);
+ &end,
+ 0);
return end;
}
@@ -305,7 +319,8 @@
int unpackImageHeight,
int unpackSkipPixels,
int unpackSkipRows,
- int unpackSkipImages) {
+ int unpackSkipImages,
+ int ignoreTrailing) {
int start, end;
computeTextureStartEnd(
@@ -318,7 +333,8 @@
unpackSkipRows,
unpackSkipImages,
&start,
- &end);
+ &end,
+ ignoreTrailing);
return end - start;
}
diff --git a/shared/OpenglCodecCommon/GLESTextureUtils.h b/shared/OpenglCodecCommon/GLESTextureUtils.h
index 0b636ac..45af27e 100644
--- a/shared/OpenglCodecCommon/GLESTextureUtils.h
+++ b/shared/OpenglCodecCommon/GLESTextureUtils.h
@@ -5,6 +5,11 @@
namespace GLESTextureUtils {
+// By spec, the buffer is only required to provide just enough data. The
+// last row does not have to fill unpackRowLength. But our decoder is
+// written to always read full row. So we add "ignoreTrailing" here. When
+// ignoreTrailing == 1 we compute the real size as defined by spec. When
+// ignoreTrailing == 0 we compute the size used by decoder/encoder.
void computeTextureStartEnd(
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
@@ -15,7 +20,8 @@
int unpackSkipRows,
int unpackSkipImages,
int* start,
- int* end);
+ int* end,
+ int ignoreTrailing);
int computeTotalImageSize(
GLsizei width, GLsizei height, GLsizei depth,
@@ -35,7 +41,8 @@
int unpackImageHeight,
int unpackSkipPixels,
int unpackSkipRows,
- int unpackSkipImages);
+ int unpackSkipImages,
+ int ignoreTrailing);
// Writes out |height| offsets for glReadPixels to read back
// data in separate rows of pixels. Returns:
diff --git a/system/GLESv2_enc/GL2Encoder.cpp b/system/GLESv2_enc/GL2Encoder.cpp
index 63b4ff4..65048cf 100755
--- a/system/GLESv2_enc/GL2Encoder.cpp
+++ b/system/GLESv2_enc/GL2Encoder.cpp
@@ -2721,10 +2721,9 @@
}
// If unpack buffer is nonzero, verify buffer data fits and is evenly divisible by the type.
-
SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
- (state->pboNeededDataSize(width, height, 1, format, type, 0) + (uintptr_t)pixels >
+ (state->pboNeededDataSize(width, height, 1, format, type, 0, 1) + (uintptr_t)pixels >
ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
GL_INVALID_OPERATION);
SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
diff --git a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp
index 90b5653..cbc7069 100644
--- a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp
+++ b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp
@@ -42,6 +42,8 @@
#include "C2GoldfishAvcDec.h"
+#include <mutex>
+
#define DEBUG 0
#if DEBUG
#define DDD(...) ALOGD(__VA_ARGS__)
@@ -64,6 +66,35 @@
So total maximum output delay is 34 */
constexpr uint32_t kMaxOutputDelay = 34;
constexpr uint32_t kMinInputBytes = 4;
+
+static std::mutex s_decoder_count_mutex;
+static int s_decoder_count = 0;
+
+int allocateDecoderId() {
+ DDD("calling %s", __func__);
+ std::lock_guard<std::mutex> lock(s_decoder_count_mutex);
+ if (s_decoder_count >= 32 || s_decoder_count < 0) {
+ ALOGE("calling %s failed", __func__);
+ return -1;
+ }
+ ++ s_decoder_count;
+ DDD("calling %s success total decoder %d", __func__, s_decoder_count);
+ return s_decoder_count;;
+}
+
+bool deAllocateDecoderId() {
+ DDD("calling %s", __func__);
+ std::lock_guard<std::mutex> lock(s_decoder_count_mutex);
+ if (s_decoder_count < 1) {
+ ALOGE("calling %s failed ", __func__);
+ return false;
+ }
+ -- s_decoder_count;
+ DDD("calling %s success total decoder %d", __func__, s_decoder_count);
+ return true;
+}
+
+
} // namespace
class C2GoldfishAvcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -307,6 +338,8 @@
if (me.v.matrix > C2Color::MATRIX_OTHER) {
me.set().matrix = C2Color::MATRIX_OTHER;
}
+ DDD("default primaries %d default range %d", me.set().primaries,
+ me.set().range);
return C2R::Ok();
}
@@ -326,6 +359,8 @@
if (me.v.matrix > C2Color::MATRIX_OTHER) {
me.set().matrix = C2Color::MATRIX_OTHER;
}
+ DDD("coded primaries %d coded range %d", me.set().primaries,
+ me.set().range);
return C2R::Ok();
}
@@ -336,6 +371,7 @@
(void)mayBlock;
// take default values for all unspecified fields, and coded values for
// specified ones
+ DDD("before change primaries %d range %d", me.v.primaries, me.v.range);
me.set().range =
coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
me.set().primaries = coded.v.primaries == PRIMARIES_UNSPECIFIED
@@ -346,6 +382,8 @@
: coded.v.transfer;
me.set().matrix = coded.v.matrix == MATRIX_UNSPECIFIED ? def.v.matrix
: coded.v.matrix;
+
+ DDD("after change primaries %d range %d", me.v.primaries, me.v.range);
return C2R::Ok();
}
@@ -357,7 +395,13 @@
int height() const { return mSize->height; }
- private:
+ int primaries() const { return mColorAspects->primaries; }
+
+ int range() const { return mColorAspects->range; }
+
+ int transfer() const { return mColorAspects->transfer; }
+
+ private:
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
std::shared_ptr<C2StreamMaxPictureSizeTuning::output> mMaxSize;
@@ -393,6 +437,9 @@
C2GoldfishAvcDec::~C2GoldfishAvcDec() { onRelease(); }
c2_status_t C2GoldfishAvcDec::onInit() {
+ ALOGD("calling onInit");
+ mId = allocateDecoderId();
+ if (mId <= 0) return C2_NO_MEMORY;
status_t err = initDecoder();
return err == OK ? C2_OK : C2_CORRUPTED;
}
@@ -407,6 +454,11 @@
void C2GoldfishAvcDec::onReset() { (void)onStop(); }
void C2GoldfishAvcDec::onRelease() {
+ DDD("calling onRelease");
+ if (mId > 0) {
+ deAllocateDecoderId();
+ mId = -1;
+ }
deleteContext();
if (mOutBlock) {
mOutBlock.reset();
@@ -457,6 +509,30 @@
return C2_OK;
}
+void C2GoldfishAvcDec::sendMetadata() {
+ // compare and send if changed
+ MetaDataColorAspects currentMetaData = {1, 0, 0, 0};
+ currentMetaData.primaries = mIntf->primaries();
+ currentMetaData.range = mIntf->range();
+ currentMetaData.transfer = mIntf->transfer();
+
+ DDD("metadata primaries %d range %d transfer %d",
+ (int)(currentMetaData.primaries),
+ (int)(currentMetaData.range),
+ (int)(currentMetaData.transfer)
+ );
+
+ if (mSentMetadata.primaries == currentMetaData.primaries &&
+ mSentMetadata.range == currentMetaData.range &&
+ mSentMetadata.transfer == currentMetaData.transfer) {
+ DDD("metadata is the same, no need to update");
+ return;
+ }
+ std::swap(mSentMetadata, currentMetaData);
+
+ mContext->sendMetadata(&(mSentMetadata));
+}
+
status_t C2GoldfishAvcDec::createDecoder() {
DDD("creating avc context now w %d h %d", mWidth, mHeight);
@@ -476,7 +552,6 @@
}
status_t C2GoldfishAvcDec::initDecoder() {
- // if (OK != createDecoder()) return UNKNOWN_ERROR;
mStride = ALIGN2(mWidth);
mSignalledError = false;
resetPlugin();
@@ -682,7 +757,6 @@
}
void C2GoldfishAvcDec::getVuiParams(h264_image_t &img) {
-
VuiColorAspects vuiColorAspects;
vuiColorAspects.primaries = img.color_primaries;
vuiColorAspects.transfer = img.color_trc;
@@ -931,6 +1005,8 @@
} // end of whChanged
} // end of isSpsFrame
+ sendMetadata();
+
uint32_t delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
diff --git a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h
index afa27f5..d90b11a 100644
--- a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h
+++ b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h
@@ -142,6 +142,10 @@
}
} mBitstreamColorAspects;
+ MetaDataColorAspects mSentMetadata = {1, 0, 0, 0};
+
+ void sendMetadata();
+
// profile
struct timeval mTimeStart;
struct timeval mTimeEnd;
@@ -155,6 +159,7 @@
std::unique_ptr<GoldfishH264Helper> mH264Helper;
+ int mId = -1;
C2_DO_NOT_COPY(C2GoldfishAvcDec);
};
diff --git a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp
index 7909aa9..6560772 100644
--- a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp
+++ b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp
@@ -49,7 +49,7 @@
}
mSlot = slot;
mAddressOffSet = static_cast<unsigned int>(mSlot) * (1 << 20);
- DDD("got memory lot %d addrr %x", mSlot, mAddressOffSet);
+ DDD("got memory lot %d addrr %lu", mSlot, mAddressOffSet);
mHasAddressSpaceMemory = true;
}
transport->writeParam(mVersion, 0, mAddressOffSet);
@@ -62,7 +62,7 @@
MediaOperation::InitContext, mAddressOffSet);
auto *retptr = transport->getReturnAddr(mAddressOffSet);
mHostHandle = *(uint64_t *)(retptr);
- DDD("initH264Context: got handle to host %lld", mHostHandle);
+ DDD("initH264Context: got handle to host %lu", mHostHandle);
}
void MediaH264Decoder::resetH264Context(unsigned int width, unsigned int height,
@@ -87,7 +87,7 @@
void MediaH264Decoder::destroyH264Context() {
- DDD("return memory lot %d addrr %x", (int)(mAddressOffSet >> 23),
+ DDD("return memory lot %d addrr %lu", (int)(mAddressOffSet >> 23),
mAddressOffSet);
auto transport = GoldfishMediaTransport::getInstance();
transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
@@ -99,7 +99,7 @@
h264_result_t MediaH264Decoder::decodeFrame(uint8_t *img, size_t szBytes,
uint64_t pts) {
- DDD("decode frame: use handle to host %lld", mHostHandle);
+ DDD("decode frame: use handle to host %lu", mHostHandle);
h264_result_t res = {0, 0};
if (!mHasAddressSpaceMemory) {
ALOGE("%s no address space memory", __func__);
@@ -126,12 +126,28 @@
return res;
}
+void MediaH264Decoder::sendMetadata(MetaDataColorAspects *ptr) {
+ DDD("send metadata to host %p", ptr);
+ if (!mHasAddressSpaceMemory) {
+ ALOGE("%s no address space memory", __func__);
+ return;
+ }
+ MetaDataColorAspects& meta = *ptr;
+ auto transport = GoldfishMediaTransport::getInstance();
+ transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+ transport->writeParam(meta.type, 1, mAddressOffSet);
+ transport->writeParam(meta.primaries, 2, mAddressOffSet);
+ transport->writeParam(meta.range, 3, mAddressOffSet);
+ transport->writeParam(meta.transfer, 4, mAddressOffSet);
+ transport->sendOperation(MediaCodecType::H264Codec, MediaOperation::SendMetadata, mAddressOffSet);
+}
+
void MediaH264Decoder::flush() {
if (!mHasAddressSpaceMemory) {
ALOGE("%s no address space memory", __func__);
return;
}
- DDD("flush: use handle to host %lld", mHostHandle);
+ DDD("flush: use handle to host %lu", mHostHandle);
auto transport = GoldfishMediaTransport::getInstance();
transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
transport->sendOperation(MediaCodecType::H264Codec, MediaOperation::Flush,
@@ -139,7 +155,7 @@
}
h264_image_t MediaH264Decoder::getImage() {
- DDD("getImage: use handle to host %lld", mHostHandle);
+ DDD("getImage: use handle to host %lu", mHostHandle);
h264_image_t res{};
if (!mHasAddressSpaceMemory) {
ALOGE("%s no address space memory", __func__);
@@ -174,7 +190,7 @@
h264_image_t
MediaH264Decoder::renderOnHostAndReturnImageMetadata(int hostColorBufferId) {
- DDD("%s: use handle to host %lld", __func__, mHostHandle);
+ DDD("%s: use handle to host %lu", __func__, mHostHandle);
h264_image_t res{};
if (hostColorBufferId < 0) {
ALOGE("%s negative color buffer id %d", __func__, hostColorBufferId);
diff --git a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h
index 1c1b262..e184cbd 100644
--- a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h
+++ b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h
@@ -17,6 +17,8 @@
#ifndef GOLDFISH_MEDIA_H264_DEC_H_
#define GOLDFISH_MEDIA_H264_DEC_H_
+#include "goldfish_media_utils.h"
+
struct h264_init_result_t {
uint64_t host_handle;
int ret;
@@ -89,5 +91,14 @@
// ask host to render to hostColorBufferId, return only image metadata back
// to guest
h264_image_t renderOnHostAndReturnImageMetadata(int hostColorBufferId);
+
+ // send metadata about the bitstream to host, such as color aspects that
+ // are set by the framework, e.g., color primaries (601, 709 etc), range
+ // (full range or limited range), transfer etc. given metadata could be
+ // of all kinds of types, the convention is that the first field server as
+ // metadata type id. host will check the type id to decide what to do with
+ // it; unrecognized typeid will be discarded by host side.
+
+ void sendMetadata(MetaDataColorAspects *ptr);
};
#endif
diff --git a/system/codecs/c2/decoders/base/include/goldfish_media_utils.h b/system/codecs/c2/decoders/base/include/goldfish_media_utils.h
index efa8859..a45cda9 100644
--- a/system/codecs/c2/decoders/base/include/goldfish_media_utils.h
+++ b/system/codecs/c2/decoders/base/include/goldfish_media_utils.h
@@ -26,6 +26,13 @@
Max = 4,
};
+struct MetaDataColorAspects {
+ uint64_t type = 1;
+ uint64_t primaries;
+ uint64_t range;
+ uint64_t transfer;
+};
+
enum class MediaOperation : __u8 {
InitContext = 0,
DestroyContext = 1,
@@ -33,7 +40,8 @@
GetImage = 3,
Flush = 4,
Reset = 5,
- Max = 6,
+ SendMetadata = 6,
+ Max = 7,
};
// This class will abstract away the knowledge required to send media codec data
diff --git a/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.cpp b/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.cpp
index 7008bd5..990f4c2 100644
--- a/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.cpp
+++ b/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.cpp
@@ -347,6 +347,13 @@
int height() const { return mSize->height; }
+ int primaries() const { return mColorAspects->primaries; }
+
+ int range() const { return mColorAspects->range; }
+
+ int transfer() const { return mColorAspects->transfer; }
+
+
private:
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -404,10 +411,11 @@
}
void C2GoldfishHevcDec::decodeHeaderAfterFlush() {
- if (mContext && !mCsd0.empty() && !mCsd1.empty()) {
+ DDD("calling %s", __func__);
+ if (mContext && !mCsd0.empty()) {
mContext->decodeFrame(&(mCsd0[0]), mCsd0.size(), 0);
- mContext->decodeFrame(&(mCsd1[0]), mCsd1.size(), 0);
- DDD("resending csd0 and csd1");
+ DDD("resending csd0");
+ DDD("calling %s success", __func__);
}
}
@@ -447,6 +455,30 @@
return C2_OK;
}
+void C2GoldfishHevcDec::sendMetadata() {
+ // compare and send if changed
+ MetaDataColorAspects currentMetaData = {1, 0, 0, 0};
+ currentMetaData.primaries = mIntf->primaries();
+ currentMetaData.range = mIntf->range();
+ currentMetaData.transfer = mIntf->transfer();
+
+ DDD("metadata primaries %d range %d transfer %d",
+ (int)(currentMetaData.primaries),
+ (int)(currentMetaData.range),
+ (int)(currentMetaData.transfer)
+ );
+
+ if (mSentMetadata.primaries == currentMetaData.primaries &&
+ mSentMetadata.range == currentMetaData.range &&
+ mSentMetadata.transfer == currentMetaData.transfer) {
+ DDD("metadata is the same, no need to update");
+ return;
+ }
+ std::swap(mSentMetadata, currentMetaData);
+
+ mContext->sendMetadata(&(mSentMetadata));
+}
+
status_t C2GoldfishHevcDec::createDecoder() {
DDD("creating hevc context now w %d h %d", mWidth, mHeight);
@@ -874,9 +906,6 @@
if (mCsd0.empty()) {
mCsd0.assign(mInPBuffer, mInPBuffer + mInPBufferSize);
DDD("assign to csd0 with %d bytpes", mInPBufferSize);
- } else if (mCsd1.empty()) {
- mCsd1.assign(mInPBuffer, mInPBuffer + mInPBufferSize);
- DDD("assign to csd1 with %d bytpes", mInPBufferSize);
}
// this is not really a valid pts from config
removePts(mPts);
@@ -922,6 +951,8 @@
} // end of whChanged
} // end of isVpsFrame
+ sendMetadata();
+
uint32_t delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
diff --git a/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.h b/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.h
index fe080cf..bc3d65b 100644
--- a/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.h
+++ b/system/codecs/c2/decoders/hevcdec/C2GoldfishHevcDec.h
@@ -142,6 +142,10 @@
}
} mBitstreamColorAspects;
+ MetaDataColorAspects mSentMetadata = {1, 0, 0, 0};
+
+ void sendMetadata();
+
// profile
struct timeval mTimeStart;
struct timeval mTimeEnd;
diff --git a/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.cpp b/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.cpp
index bb2fbfa..f1bc356 100644
--- a/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.cpp
+++ b/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.cpp
@@ -126,6 +126,22 @@
return res;
}
+void MediaHevcDecoder::sendMetadata(MetaDataColorAspects *ptr) {
+ DDD("send metadata to host %p", ptr);
+ if (!mHasAddressSpaceMemory) {
+ ALOGE("%s no address space memory", __func__);
+ return;
+ }
+ MetaDataColorAspects& meta = *ptr;
+ auto transport = GoldfishMediaTransport::getInstance();
+ transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+ transport->writeParam(meta.type, 1, mAddressOffSet);
+ transport->writeParam(meta.primaries, 2, mAddressOffSet);
+ transport->writeParam(meta.range, 3, mAddressOffSet);
+ transport->writeParam(meta.transfer, 4, mAddressOffSet);
+ transport->sendOperation(MediaCodecType::HevcCodec, MediaOperation::SendMetadata, mAddressOffSet);
+}
+
void MediaHevcDecoder::flush() {
if (!mHasAddressSpaceMemory) {
ALOGE("%s no address space memory", __func__);
diff --git a/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.h b/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.h
index 8dfb0cf..878950e 100644
--- a/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.h
+++ b/system/codecs/c2/decoders/hevcdec/MediaHevcDecoder.h
@@ -17,6 +17,8 @@
#ifndef GOLDFISH_MEDIA_Hevc_DEC_H_
#define GOLDFISH_MEDIA_Hevc_DEC_H_
+#include "goldfish_media_utils.h"
+
struct hevc_init_result_t {
uint64_t host_handle;
int ret;
@@ -89,5 +91,8 @@
// ask host to render to hostColorBufferId, return only image metadata back
// to guest
hevc_image_t renderOnHostAndReturnImageMetadata(int hostColorBufferId);
+
+ void sendMetadata(MetaDataColorAspects *ptr);
+
};
#endif
diff --git a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp
index 99f0469..be6428e 100644
--- a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp
+++ b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp
@@ -324,6 +324,12 @@
int height() const { return mSize->height; }
+ int primaries() const { return mDefaultColorAspects->primaries; }
+
+ int range() const { return mDefaultColorAspects->range; }
+
+ int transfer() const { return mDefaultColorAspects->transfer; }
+
static C2R Hdr10PlusInfoInputSetter(bool mayBlock,
C2P<C2StreamHdr10PlusInfo::input> &me) {
(void)mayBlock;
@@ -416,6 +422,30 @@
void C2GoldfishVpxDec::onRelease() { destroyDecoder(); }
+void C2GoldfishVpxDec::sendMetadata() {
+ // compare and send if changed
+ MetaDataColorAspects currentMetaData = {1, 0, 0, 0};
+ currentMetaData.primaries = mIntf->primaries();
+ currentMetaData.range = mIntf->range();
+ currentMetaData.transfer = mIntf->transfer();
+
+ DDD("metadata primaries %d range %d transfer %d",
+ (int)(currentMetaData.primaries),
+ (int)(currentMetaData.range),
+ (int)(currentMetaData.transfer)
+ );
+
+ if (mSentMetadata.primaries == currentMetaData.primaries &&
+ mSentMetadata.range == currentMetaData.range &&
+ mSentMetadata.transfer == currentMetaData.transfer) {
+ DDD("metadata is the same, no need to update");
+ return;
+ }
+ std::swap(mSentMetadata, currentMetaData);
+
+ vpx_codec_send_metadata(mCtx, &(mSentMetadata));
+}
+
c2_status_t C2GoldfishVpxDec::onFlush_sm() {
if (mFrameParallelMode) {
// Flush decoder by passing nullptr data ptr and 0 size.
@@ -609,6 +639,8 @@
}
}
+ sendMetadata();
+
if (inSize) {
uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
vpx_codec_err_t err = vpx_codec_decode(
diff --git a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h
index 4b356da..738d9fc 100644
--- a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h
+++ b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h
@@ -16,6 +16,7 @@
#pragma once
+#include "goldfish_media_utils.h"
#include "goldfish_vpx_defs.h"
#include <SimpleC2Component.h>
@@ -95,6 +96,9 @@
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work);
+ MetaDataColorAspects mSentMetadata = {1, 0, 0, 0};
+ void sendMetadata();
+
C2_DO_NOT_COPY(C2GoldfishVpxDec);
};
diff --git a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h
index bbcc805..1be05c9 100644
--- a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h
+++ b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h
@@ -61,4 +61,6 @@
int vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
unsigned int data_sz, void *user_priv, long deadline);
+void vpx_codec_send_metadata(vpx_codec_ctx_t *ctx, void*ptr);
+
#endif // MY_VPX_DEFS_H_
diff --git a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp
index d008efe..e1fa879 100644
--- a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp
+++ b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp
@@ -142,6 +142,17 @@
return &(ctx->myImg);
}
+void vpx_codec_send_metadata(vpx_codec_ctx_t *ctx, void *ptr) {
+ MetaDataColorAspects& meta = *(MetaDataColorAspects*)ptr;
+ auto transport = GoldfishMediaTransport::getInstance();
+ transport->writeParam(ctx->id, 0, ctx->address_offset);
+ transport->writeParam(meta.type, 1, ctx->address_offset);
+ transport->writeParam(meta.primaries, 2, ctx->address_offset);
+ transport->writeParam(meta.range, 3, ctx->address_offset);
+ transport->writeParam(meta.transfer, 4, ctx->address_offset);
+ sendVpxOperation(ctx, MediaOperation::SendMetadata);
+}
+
int vpx_codec_flush(vpx_codec_ctx_t *ctx) {
DDD("%s %d", __func__, __LINE__);
if (!ctx) {
diff --git a/system/hwc2/DisplayFinder.cpp b/system/hwc2/DisplayFinder.cpp
index a55a49d..2016c4b 100644
--- a/system/hwc2/DisplayFinder.cpp
+++ b/system/hwc2/DisplayFinder.cpp
@@ -110,7 +110,7 @@
rcEnc->rcGetFBDisplayConfigsParam(rcEnc, configId, FB_HEIGHT), //
rcEnc->rcGetFBDisplayConfigsParam(rcEnc, configId, FB_XDPI), //
rcEnc->rcGetFBDisplayConfigsParam(rcEnc, configId, FB_YDPI), //
- getVsyncForDisplay(drmPresenter, configId) //
+ getVsyncForDisplay(drmPresenter, display.displayId) //
));
}
} else {
diff --git a/system/hwc2/HostComposer.cpp b/system/hwc2/HostComposer.cpp
index 5bf324e..472e782 100644
--- a/system/hwc2/HostComposer.cpp
+++ b/system/hwc2/HostComposer.cpp
@@ -570,7 +570,7 @@
display->clearReleaseFencesAndIdsLocked();
if (numLayer == 0) {
- ALOGW(
+ ALOGV(
"%s display has no layers to compose, flushing client target buffer.",
__FUNCTION__);
diff --git a/system/hwc3/HostFrameComposer.cpp b/system/hwc3/HostFrameComposer.cpp
index 7b090c2..f976e05 100644
--- a/system/hwc3/HostFrameComposer.cpp
+++ b/system/hwc3/HostFrameComposer.cpp
@@ -578,7 +578,7 @@
displayId, static_cast<int>(layers.size()));
if (numLayer == 0) {
- ALOGW(
+ ALOGV(
"%s display has no layers to compose, flushing client target buffer.",
__FUNCTION__);
diff --git a/system/vulkan_enc/AndroidHardwareBuffer.cpp b/system/vulkan_enc/AndroidHardwareBuffer.cpp
index 40360a2..c49693d 100644
--- a/system/vulkan_enc/AndroidHardwareBuffer.cpp
+++ b/system/vulkan_enc/AndroidHardwareBuffer.cpp
@@ -75,8 +75,46 @@
if (!(desc.usage & (gpu_usage))) {
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
-
- ahbFormatProps->format = vk_format_from_android(desc.format);
+ switch(desc.format) {
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ ahbFormatProps->format = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ ahbFormatProps->format = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ ahbFormatProps->format = VK_FORMAT_R8G8B8_UNORM;
+ break;
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ ahbFormatProps->format = VK_FORMAT_R5G6B5_UNORM_PACK16;
+ break;
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ ahbFormatProps->format = VK_FORMAT_R16G16B16A16_SFLOAT;
+ break;
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ ahbFormatProps->format = VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ break;
+ case AHARDWAREBUFFER_FORMAT_D16_UNORM:
+ ahbFormatProps->format = VK_FORMAT_D16_UNORM;
+ break;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM:
+ ahbFormatProps->format = VK_FORMAT_X8_D24_UNORM_PACK32;
+ break;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
+ ahbFormatProps->format = VK_FORMAT_D24_UNORM_S8_UINT;
+ break;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
+ ahbFormatProps->format = VK_FORMAT_D32_SFLOAT;
+ break;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
+ ahbFormatProps->format = VK_FORMAT_D32_SFLOAT_S8_UINT;
+ break;
+ case AHARDWAREBUFFER_FORMAT_S8_UINT:
+ ahbFormatProps->format = VK_FORMAT_S8_UINT;
+ break;
+ default:
+ ahbFormatProps->format = VK_FORMAT_UNDEFINED;
+ }
ahbFormatProps->externalFormat = desc.format;
// The formatFeatures member must include
diff --git a/system/vulkan_enc/ResourceTracker.cpp b/system/vulkan_enc/ResourceTracker.cpp
index a4bf2e7..6cc7010 100644
--- a/system/vulkan_enc/ResourceTracker.cpp
+++ b/system/vulkan_enc/ResourceTracker.cpp
@@ -362,6 +362,10 @@
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+ bool hasExternalFormat = false;
+ unsigned androidFormat = 0;
+#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
@@ -5055,6 +5059,11 @@
info.createInfo = *pCreateInfo;
info.createInfo.pNext = nullptr;
+ if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
+ info.hasExternalFormat = true;
+ info.androidFormat = extFormatAndroidPtr->externalFormat;
+ }
+
if (supportsCreateResourcesWithRequirements()) {
info.baseRequirementsKnown = true;
}
@@ -5192,7 +5201,8 @@
vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
if (samplerYcbcrConversionInfo) {
if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
- localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
+ localVkSamplerYcbcrConversionInfo =
+ vk_make_orphan_copy(*samplerYcbcrConversionInfo);
vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
}
}
@@ -7575,14 +7585,24 @@
(void)input_result;
VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
+ vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
- const VkExternalFormatANDROID* extFormatAndroidPtr =
- vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
- if (extFormatAndroidPtr) {
- if (extFormatAndroidPtr->externalFormat) {
- localCreateInfo.format =
- vk_format_from_android(extFormatAndroidPtr->externalFormat);
+ if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
+ AutoLock<RecursiveLock> lock(mLock);
+
+ auto it = info_VkImage.find(pCreateInfo->image);
+ if (it != info_VkImage.end() && it->second.hasExternalFormat) {
+ localCreateInfo.format = vk_format_from_android(it->second.androidFormat);
+ }
+ }
+ VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
+ const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
+ vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
+ if (samplerYcbcrConversionInfo) {
+ if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
+ localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
+ vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
}
}
#endif
@@ -7796,6 +7816,63 @@
return VK_SUCCESS;
}
+ VkResult on_vkCreateGraphicsPipelines(
+ void* context,
+ VkResult input_result,
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines) {
+ (void)input_result;
+ VkEncoder* enc = (VkEncoder*)context;
+ std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(
+ pCreateInfos, pCreateInfos + createInfoCount);
+ for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
+ // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
+ bool requireViewportState = false;
+ // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
+ requireViewportState |= graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
+ graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable
+ == VK_FALSE;
+ // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
+#ifdef VK_EXT_extended_dynamic_state2
+ if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
+ for (uint32_t i = 0; i <
+ graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount; i++) {
+ if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
+ graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
+ requireViewportState = true;
+ break;
+ }
+ }
+ }
+#endif // VK_EXT_extended_dynamic_state2
+ if (!requireViewportState) {
+ graphicsPipelineCreateInfo.pViewportState = nullptr;
+ }
+
+ // It has the same requirement as for pViewportState.
+ bool shouldIncludeFragmentShaderState = requireViewportState;
+
+ // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
+ if (!shouldIncludeFragmentShaderState) {
+ graphicsPipelineCreateInfo.pMultisampleState = nullptr;
+ }
+
+ // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
+ // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
+ if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE
+ || !shouldIncludeFragmentShaderState) {
+ graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
+ graphicsPipelineCreateInfo.pColorBlendState = nullptr;
+ }
+ }
+ return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
+ localCreateInfos.data(), pAllocator, pPipelines, true /* do lock */);
+ }
+
uint32_t getApiVersionFromInstance(VkInstance instance) const {
AutoLock<RecursiveLock> lock(mLock);
uint32_t api = kDefaultApiVersion;
@@ -9026,6 +9103,18 @@
return mImpl->on_vkQueueSignalReleaseImageANDROID(context, input_result, queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
}
+VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
+ void* context,
+ VkResult input_result,
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines) {
+ return mImpl->on_vkCreateGraphicsPipelines(context, input_result, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+}
+
void ResourceTracker::deviceMemoryTransform_tohost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
diff --git a/system/vulkan_enc/ResourceTracker.h b/system/vulkan_enc/ResourceTracker.h
index e25e75d..b50635d 100644
--- a/system/vulkan_enc/ResourceTracker.h
+++ b/system/vulkan_enc/ResourceTracker.h
@@ -627,6 +627,16 @@
VkImage image,
int* pNativeFenceFd);
+ VkResult on_vkCreateGraphicsPipelines(
+ void* context,
+ VkResult input_result,
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines);
+
uint8_t* getMappedPointer(VkDeviceMemory memory);
VkDeviceSize getMappedSize(VkDeviceMemory memory);
VkDeviceSize getNonCoherentExtendedSize(VkDevice device, VkDeviceSize basicSize) const;
diff --git a/system/vulkan_enc/func_table.cpp b/system/vulkan_enc/func_table.cpp
index 4e17d42..54b232e 100644
--- a/system/vulkan_enc/func_table.cpp
+++ b/system/vulkan_enc/func_table.cpp
@@ -769,7 +769,8 @@
AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines");
auto vkEnc = ResourceTracker::getThreadLocalEncoder();
VkResult vkCreateGraphicsPipelines_VkResult_return = (VkResult)0;
- vkCreateGraphicsPipelines_VkResult_return = vkEnc->vkCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+ auto resources = ResourceTracker::get();
+ vkCreateGraphicsPipelines_VkResult_return = resources->on_vkCreateGraphicsPipelines(vkEnc, VK_SUCCESS, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
return vkCreateGraphicsPipelines_VkResult_return;
}
static VkResult entry_vkCreateComputePipelines(