Merge "Do not allocate CPU buffers if not requested"
diff --git a/shared/gralloc_cb/include/gralloc_cb_bp.h b/shared/gralloc_cb/include/gralloc_cb_bp.h
index 77e3706..7283e9d 100644
--- a/shared/gralloc_cb/include/gralloc_cb_bp.h
+++ b/shared/gralloc_cb/include/gralloc_cb_bp.h
@@ -29,17 +29,13 @@
((sizeof(*this)-sizeof(native_handle_t)-nfd*sizeof(int32_t))/sizeof(int32_t))
struct cb_handle_t : public native_handle_t {
- cb_handle_t(int32_t p_bufferFd,
- QEMU_PIPE_HANDLE p_hostHandleRefCountFd,
- uint32_t p_magic,
+ cb_handle_t(uint32_t p_magic,
uint32_t p_hostHandle,
int32_t p_format,
uint32_t p_stride,
uint32_t p_bufSize,
uint64_t p_mmapedOffset)
- : bufferFd(p_bufferFd),
- hostHandleRefCountFd(p_hostHandleRefCountFd),
- magic(p_magic),
+ : magic(p_magic),
hostHandle(p_hostHandle),
format(p_format),
bufferSize(p_bufSize),
@@ -47,8 +43,6 @@
mmapedOffsetLo(static_cast<uint32_t>(p_mmapedOffset)),
mmapedOffsetHi(static_cast<uint32_t>(p_mmapedOffset >> 32)) {
version = sizeof(native_handle);
- numFds = ((bufferFd >= 0) ? 1 : 0) + (qemu_pipe_valid(hostHandleRefCountFd) ? 1 : 0);
- numInts = 0; // has to be overwritten in children classes
}
uint64_t getMmapedOffset() const {
@@ -78,9 +72,7 @@
return from(const_cast<void*>(p));
}
- // fds
- int32_t bufferFd; // underlying buffer file handle
- QEMU_PIPE_HANDLE hostHandleRefCountFd; // guest side refcounter to hostHandle
+ int32_t fds[2];
// ints
uint32_t magic; // magic number in order to validate a pointer
diff --git a/system/gralloc/gralloc_old.cpp b/system/gralloc/gralloc_old.cpp
index dd054d5..080b34c 100644
--- a/system/gralloc/gralloc_old.cpp
+++ b/system/gralloc/gralloc_old.cpp
@@ -82,14 +82,14 @@
using android::base::guest::getCurrentThreadId;
const uint32_t CB_HANDLE_MAGIC_OLD = CB_HANDLE_MAGIC_BASE | 0x1;
+const int kBufferFdIndex = 0;
+const int kHostHandleRefCountIndex = 1;
struct cb_handle_old_t : public cb_handle_t {
cb_handle_old_t(int p_fd, int p_ashmemSize, int p_usage,
int p_width, int p_height,
int p_format, int p_glFormat, int p_glType)
- : cb_handle_t(p_fd,
- QEMU_PIPE_INVALID_HANDLE,
- CB_HANDLE_MAGIC_OLD,
+ : cb_handle_t(CB_HANDLE_MAGIC_OLD,
0,
p_format,
p_width,
@@ -108,18 +108,20 @@
lockedTop(0),
lockedWidth(0),
lockedHeight(0) {
+ fds[kBufferFdIndex] = p_fd;
+ numFds = 1;
numInts = CB_HANDLE_NUM_INTS(numFds);
}
bool hasRefcountPipe() const {
- return qemu_pipe_valid(hostHandleRefCountFd);
+ return qemu_pipe_valid(fds[kHostHandleRefCountIndex]);
}
void setRefcountPipeFd(QEMU_PIPE_HANDLE fd) {
if (qemu_pipe_valid(fd)) {
numFds++;
}
- hostHandleRefCountFd = fd;
+ fds[kHostHandleRefCountIndex] = fd;
numInts = CB_HANDLE_NUM_INTS(numFds);
}
@@ -452,12 +454,13 @@
static int map_buffer(cb_handle_old_t *cb, void **vaddr)
{
- if (cb->bufferFd < 0) {
+ const int bufferFd = cb->fds[kBufferFdIndex];
+ if (bufferFd < 0) {
return -EINVAL;
}
void *addr = mmap(0, cb->bufferSize, PROT_READ | PROT_WRITE,
- MAP_SHARED, cb->bufferFd, 0);
+ MAP_SHARED, bufferFd, 0);
if (addr == MAP_FAILED) {
ALOGE("%s: failed to map ashmem region!", __FUNCTION__);
return -errno;
@@ -991,17 +994,18 @@
//
// detach and unmap ashmem area if present
//
- if (cb->bufferFd > 0) {
+ const int bufferFd = cb->fds[kBufferFdIndex];
+ if (bufferFd > 0) {
if (cb->bufferSize > 0 && cb->getBufferPtr()) {
D("%s: unmapped %p", __FUNCTION__, cb->getBufferPtr());
munmap(cb->getBufferPtr(), cb->bufferSize);
put_gralloc_region(rcEnc, cb->bufferSize);
}
- close(cb->bufferFd);
+ close(bufferFd);
}
- if(qemu_pipe_valid(cb->hostHandleRefCountFd)) {
- qemu_pipe_close(cb->hostHandleRefCountFd);
+ if(qemu_pipe_valid(cb->fds[kHostHandleRefCountIndex])) {
+ qemu_pipe_close(cb->fds[kHostHandleRefCountIndex]);
}
D("%s: done", __FUNCTION__);
// remove it from the allocated list
diff --git a/system/hals/allocator3.cpp b/system/hals/allocator3.cpp
index 4d235e8..8a05abd 100644
--- a/system/hals/allocator3.cpp
+++ b/system/hals/allocator3.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <android-base/unique_fd.h>
#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
#include <android/hardware/graphics/mapper/3.0/IMapper.h>
#include <hidl/LegacySupport.h>
@@ -210,22 +211,27 @@
RETURN_ERROR(Error3::UNSUPPORTED);
}
- const size_t align1 = align - 1;
const uint32_t width = descriptor.width;
const uint32_t height = descriptor.height;
- uint32_t stride;
size_t bufferSize;
+ uint32_t stride;
- if (yuv_format) {
- const size_t yStride = (width * bpp + align1) & ~align1;
- const size_t uvStride = (yStride / 2 + align1) & ~align1;
- const size_t uvHeight = height / 2;
- bufferSize = yStride * height + 2 * (uvHeight * uvStride);
- stride = yStride / bpp;
+ if (usage & (BufferUsage::CPU_READ_MASK | BufferUsage::CPU_WRITE_MASK)) {
+ const size_t align1 = align - 1;
+ if (yuv_format) {
+ const size_t yStride = (width * bpp + align1) & ~align1;
+ const size_t uvStride = (yStride / 2 + align1) & ~align1;
+ const size_t uvHeight = height / 2;
+ bufferSize = yStride * height + 2 * (uvHeight * uvStride);
+ stride = yStride / bpp;
+ } else {
+ const size_t bpr = (width * bpp + align1) & ~align1;
+ bufferSize = bpr * height;
+ stride = bpr / bpp;
+ }
} else {
- const size_t bpr = (width * bpp + align1) & ~align1;
- bufferSize = bpr * height;
- stride = bpr / bpp;
+ bufferSize = 0;
+ stride = 0;
}
*pStride = stride;
@@ -315,22 +321,27 @@
ExtendedRCEncoderContext *const rcEnc = conn.getRcEncoder();
CRASH_IF(!rcEnc, "conn.getRcEncoder() failed");
- GoldfishAddressSpaceHostMemoryAllocator host_memory_allocator(
- rcEnc->featureInfo_const()->hasSharedSlotsHostMemoryAllocator);
- if (!host_memory_allocator.is_opened()) {
- RETURN_ERROR(Error3::NO_RESOURCES);
- }
-
+ android::base::unique_fd cpuAlocatorFd;
GoldfishAddressSpaceBlock bufferBits;
- if (host_memory_allocator.hostMalloc(&bufferBits, bufferSize)) {
- RETURN_ERROR(Error3::NO_RESOURCES);
+ if (bufferSize > 0) {
+ GoldfishAddressSpaceHostMemoryAllocator host_memory_allocator(
+ rcEnc->featureInfo_const()->hasSharedSlotsHostMemoryAllocator);
+ if (!host_memory_allocator.is_opened()) {
+ RETURN_ERROR(Error3::NO_RESOURCES);
+ }
+
+ if (host_memory_allocator.hostMalloc(&bufferBits, bufferSize)) {
+ RETURN_ERROR(Error3::NO_RESOURCES);
+ }
+
+ cpuAlocatorFd.reset(host_memory_allocator.release());
}
uint32_t hostHandle = 0;
- QEMU_PIPE_HANDLE hostHandleRefCountFd = QEMU_PIPE_INVALID_HANDLE;
+ android::base::unique_fd hostHandleRefCountFd;
if (needGpuBuffer(usage)) {
- hostHandleRefCountFd = qemu_pipe_open("refcount");
- if (!qemu_pipe_valid(hostHandleRefCountFd)) {
+ hostHandleRefCountFd.reset(qemu_pipe_open("refcount"));
+ if (!hostHandleRefCountFd.ok()) {
RETURN_ERROR(Error3::NO_RESOURCES);
}
@@ -343,23 +354,21 @@
allocFormat, static_cast<int>(emulatorFrameworkFormat));
if (!hostHandle) {
- qemu_pipe_close(hostHandleRefCountFd);
RETURN_ERROR(Error3::NO_RESOURCES);
}
- if (qemu_pipe_write(hostHandleRefCountFd,
+ if (qemu_pipe_write(hostHandleRefCountFd.get(),
&hostHandle,
sizeof(hostHandle)) != sizeof(hostHandle)) {
rcEnc->rcCloseColorBuffer(rcEnc, hostHandle);
- qemu_pipe_close(hostHandleRefCountFd);
RETURN_ERROR(Error3::NO_RESOURCES);
}
}
std::unique_ptr<cb_handle_30_t> handle =
std::make_unique<cb_handle_30_t>(
- host_memory_allocator.release(),
- hostHandleRefCountFd,
+ cpuAlocatorFd.release(),
+ hostHandleRefCountFd.release(),
hostHandle,
usage,
width,
@@ -380,14 +389,14 @@
}
void freeCb(std::unique_ptr<cb_handle_30_t> cb) {
- // no need to undo .hostMalloc: the kernel will take care of it once the
- // last bufferFd (duped) is closed.
-
- if (qemu_pipe_valid(cb->hostHandleRefCountFd)) {
- qemu_pipe_close(cb->hostHandleRefCountFd);
+ if (cb->hostHandleRefcountFdIndex >= 0) {
+ ::close(cb->fds[cb->hostHandleRefcountFdIndex]);
}
- GoldfishAddressSpaceBlock::memoryUnmap(cb->getBufferPtr(), cb->mmapedSize);
- GoldfishAddressSpaceHostMemoryAllocator::closeHandle(cb->bufferFd);
+
+ if (cb->bufferFdIndex >= 0) {
+ GoldfishAddressSpaceBlock::memoryUnmap(cb->getBufferPtr(), cb->mmapedSize);
+ GoldfishAddressSpaceHostMemoryAllocator::closeHandle(cb->fds[cb->bufferFdIndex]);
+ }
}
HostConnectionSession getHostConnectionSession() const {
diff --git a/system/hals/cb_handle_30.h b/system/hals/cb_handle_30.h
index 161e0ed..ca305c9 100644
--- a/system/hals/cb_handle_30.h
+++ b/system/hals/cb_handle_30.h
@@ -24,7 +24,7 @@
struct cb_handle_30_t : public cb_handle_t {
cb_handle_30_t(int p_bufferFd,
- QEMU_PIPE_HANDLE p_hostHandleRefCountFd,
+ int p_hostHandleRefCountFd,
uint32_t p_hostHandle,
uint32_t p_usage,
uint32_t p_width,
@@ -38,9 +38,7 @@
uint64_t p_mmapedOffset,
uint32_t p_bytesPerPixel,
uint32_t p_stride)
- : cb_handle_t(p_bufferFd,
- p_hostHandleRefCountFd,
- CB_HANDLE_MAGIC_30,
+ : cb_handle_t(CB_HANDLE_MAGIC_30,
p_hostHandle,
p_format,
p_stride,
@@ -53,13 +51,26 @@
glType(p_glType),
bytesPerPixel(p_bytesPerPixel),
mmapedSize(p_mmapedSize),
- locked(0),
- lockedUsage(0),
- lockedLeft(0),
- lockedTop(0),
- lockedWidth(0),
- lockedHeight(0) {
- numInts = CB_HANDLE_NUM_INTS(numFds);
+ lockedUsage(0) {
+ fds[0] = -1;
+ fds[1] = -1;
+ int n = 0;
+ if (p_bufferFd >= 0) {
+ bufferFdIndex = n++;
+ fds[bufferFdIndex] = p_bufferFd;
+ } else {
+ bufferFdIndex = -1;
+ }
+
+ if (p_hostHandleRefCountFd >= 0) {
+ hostHandleRefcountFdIndex = n++;
+ fds[hostHandleRefcountFdIndex] = p_hostHandleRefCountFd;
+ } else {
+ hostHandleRefcountFdIndex = -1;
+ }
+
+ numFds = n;
+ numInts = CB_HANDLE_NUM_INTS(n);
setBufferPtr(p_bufPtr);
}
@@ -99,8 +110,10 @@
uint32_t mmapedSize; // real allocation side
uint32_t bufferPtrLo;
uint32_t bufferPtrHi;
- uint32_t locked;
- uint32_t lockedUsage;
+ uint8_t lockedUsage;
+ int8_t bufferFdIndex;
+ int8_t hostHandleRefcountFdIndex;
+ int8_t unused;
uint32_t lockedLeft; // region of buffer locked for s/w write
uint32_t lockedTop;
uint32_t lockedWidth;
diff --git a/system/hals/mapper3.cpp b/system/hals/mapper3.cpp
index e0af035..9848809 100644
--- a/system/hals/mapper3.cpp
+++ b/system/hals/mapper3.cpp
@@ -242,11 +242,12 @@
}
if (cb->mmapedSize > 0) {
+ LOG_ALWAYS_FATAL_IF(cb->bufferFdIndex < 0);
void* ptr;
const int res = GoldfishAddressSpaceBlock::memoryMap(
cb->getBufferPtr(),
cb->mmapedSize,
- cb->bufferFd,
+ cb->fds[cb->bufferFdIndex],
cb->getMmapedOffset(),
&ptr);
if (res) {
@@ -268,7 +269,7 @@
}
Error3 lockImpl(void* raw,
- uint64_t cpuUsage,
+ const uint64_t uncheckedUsage,
const Rect& accessRegion,
const hidl_handle& acquireFence,
void** pptr,
@@ -281,7 +282,12 @@
if (!cb) {
RETURN_ERROR(Error3::BAD_BUFFER);
}
- if ((cb->usage & cpuUsage) == 0) {
+ if (cb->lockedUsage) {
+ RETURN_ERROR(Error3::BAD_VALUE);
+ }
+ const uint8_t checkedUsage = uncheckedUsage & cb->usage &
+ (BufferUsage::CPU_READ_MASK | BufferUsage::CPU_WRITE_MASK);
+ if (checkedUsage == 0) {
RETURN_ERROR(Error3::BAD_VALUE);
}
if (!cb->bufferSize) {
@@ -296,7 +302,7 @@
}
if (cb->hostHandle) {
- const Error3 e = lockHostImpl(*cb, cpuUsage, accessRegion, bufferBits);
+ const Error3 e = lockHostImpl(*cb, checkedUsage, accessRegion, bufferBits);
if (e != Error3::NONE) {
return e;
}
@@ -309,7 +315,7 @@
}
Error3 lockYCbCrImpl(void* raw,
- uint64_t cpuUsage,
+ const uint64_t uncheckedUsage,
const Rect& accessRegion,
const hidl_handle& acquireFence,
YCbCrLayout3* pYcbcr) {
@@ -320,7 +326,12 @@
if (!cb) {
RETURN_ERROR(Error3::BAD_BUFFER);
}
- if ((cb->usage & cpuUsage) == 0) {
+ if (cb->lockedUsage) {
+ RETURN_ERROR(Error3::BAD_VALUE);
+ }
+ const uint8_t checkedUsage = uncheckedUsage & cb->usage &
+ (BufferUsage::CPU_READ_MASK | BufferUsage::CPU_WRITE_MASK);
+ if (checkedUsage == 0) {
RETURN_ERROR(Error3::BAD_VALUE);
}
if (!cb->bufferSize) {
@@ -379,7 +390,7 @@
}
if (cb->hostHandle) {
- const Error3 e = lockHostImpl(*cb, cpuUsage, accessRegion, bufferBits);
+ const Error3 e = lockHostImpl(*cb, checkedUsage, accessRegion, bufferBits);
if (e != Error3::NONE) {
return e;
}
@@ -396,14 +407,12 @@
}
Error3 lockHostImpl(cb_handle_30_t& cb,
- const uint32_t usage,
+ const uint8_t checkedUsage,
const Rect& accessRegion,
char* const bufferBits) {
- const bool usageSwRead = usage & BufferUsage::CPU_READ_MASK;
- const bool usageSwWrite = usage & BufferUsage::CPU_WRITE_MASK;
-
const HostConnectionSession conn = getHostConnectionSession();
ExtendedRCEncoderContext *const rcEnc = conn.getRcEncoder();
+ const bool usageSwRead = (checkedUsage & BufferUsage::CPU_READ_MASK) != 0;
const int res = rcEnc->rcColorBufferCacheFlush(
rcEnc, cb.hostHandle, 0, usageSwRead);
@@ -411,8 +420,7 @@
RETURN_ERROR(Error3::NO_RESOURCES);
}
- const bool cbReadable = cb.usage & BufferUsage::CPU_READ_MASK;
- if (usageSwRead && cbReadable) {
+ if (usageSwRead) {
if (gralloc_is_yuv_format(cb.format)) {
if (rcEnc->hasYUVCache()) {
uint32_t bufferSize;
@@ -484,7 +492,7 @@
}
}
- if (usageSwWrite) {
+ if (checkedUsage & BufferUsage::CPU_WRITE_MASK) {
cb.lockedLeft = accessRegion.left;
cb.lockedTop = accessRegion.top;
cb.lockedWidth = accessRegion.width;
@@ -495,9 +503,7 @@
cb.lockedWidth = cb.width;
cb.lockedHeight = cb.height;
}
-
- cb.locked = 1;
- cb.lockedUsage = usage;
+ cb.lockedUsage = checkedUsage;
RETURN(Error3::NONE);
}
@@ -511,6 +517,9 @@
if (!cb) {
RETURN_ERROR(Error3::BAD_BUFFER);
}
+ if (cb->lockedUsage == 0) {
+ RETURN_ERROR(Error3::BAD_VALUE);
+ }
if (!cb->bufferSize) {
RETURN_ERROR(Error3::BAD_BUFFER);
}
@@ -528,16 +537,12 @@
void unlockHostImpl(cb_handle_30_t& cb, char* const bufferBits) {
AEMU_SCOPED_TRACE("unlockHostImpl body");
- const int bpp = glUtilsPixelBitSize(cb.glFormat, cb.glType) >> 3;
- const uint32_t lockedUsage = cb.lockedUsage;
- const uint32_t rgbSize = cb.width * cb.height * bpp;
- const char* bitsToSend;
- uint32_t sizeToSend;
+ if (cb.lockedUsage & BufferUsage::CPU_WRITE_MASK) {
+ const int bpp = glUtilsPixelBitSize(cb.glFormat, cb.glType) >> 3;
+ const uint32_t rgbSize = cb.width * cb.height * bpp;
+ const char* bitsToSend;
+ uint32_t sizeToSend;
- const uint32_t usageSwWrite = (uint32_t)BufferUsage::CPU_WRITE_MASK;
- uint32_t readOnly = (!(lockedUsage & usageSwWrite));
-
- if (!readOnly) {
if (gralloc_is_yuv_format(cb.format)) {
bitsToSend = bufferBits;
switch (static_cast<PixelFormat>(cb.format)) {
@@ -579,7 +584,6 @@
cb.lockedTop = 0;
cb.lockedWidth = 0;
cb.lockedHeight = 0;
- cb.locked = 0;
cb.lockedUsage = 0;
}