Merge 'aosp/upstream-master' into 'aosp/master' am: 01b2926cf7
Change-Id: Ib55889bc29919cf571406f1e1563fa0c34c7bbf7
diff --git a/OWNERS b/OWNERS
index 37276b3..d9d5bf3 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1 +1,11 @@
-adelva@google.com
\ No newline at end of file
+dbehr@chromium.org
+dcastagna@chromium.org
+ddavenport@chromium.org
+gurchetansingh@chromium.org
+hoegsberg@chromium.org
+ihf@chromium.org
+lepton@chromium.org
+marcheu@chromium.org
+stevensd@chromium.org
+tfiga@chromium.org
+tutankhamen@chromium.org
diff --git a/OWNERS.android b/OWNERS.android
new file mode 100644
index 0000000..be55e00
--- /dev/null
+++ b/OWNERS.android
@@ -0,0 +1,2 @@
+adelva@google.com
+natsu@google.com
diff --git a/amdgpu.c b/amdgpu.c
index 5f2a93b..795d137 100644
--- a/amdgpu.c
+++ b/amdgpu.c
@@ -18,13 +18,9 @@
#include "helpers.h"
#include "util.h"
-#ifdef __ANDROID__
-#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
-#else
// clang-format off
#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
// clang-format on
-#endif
#define TILE_TYPE_LINEAR 0
/* DRI backend decides tiling in this case. */
@@ -39,8 +35,8 @@
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
-const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
- DRM_FORMAT_NV21, DRM_FORMAT_NV12,
+const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
+ DRM_FORMAT_NV21, DRM_FORMAT_NV12,
DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
static int amdgpu_init(struct driver *drv)
@@ -142,43 +138,13 @@
drv->priv = NULL;
}
-static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
int ret;
uint32_t plane, stride;
- struct combination *combo;
union drm_amdgpu_gem_create gem_create;
- combo = drv_get_combination(bo->drv, format, use_flags);
- if (!combo)
- return -EINVAL;
-
- if (combo->metadata.tiling == TILE_TYPE_DRI) {
- bool needs_alignment = false;
-#ifdef __ANDROID__
- /*
- * Currently, the gralloc API doesn't differentiate between allocation time and map
- * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
- * allocation time.
- *
- * See b/115946221,b/117942643
- */
- if (use_flags & (BO_USE_SW_MASK))
- needs_alignment = true;
-#endif
- // See b/122049612
- if (use_flags & (BO_USE_SCANOUT))
- needs_alignment = true;
-
- if (needs_alignment) {
- uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
- width = ALIGN(width, 256 / bytes_per_pixel);
- }
-
- return dri_bo_create(bo, width, height, format, use_flags);
- }
-
stride = drv_stride_from_format(format, width, 0);
stride = ALIGN(stride, 256);
@@ -210,14 +176,72 @@
return 0;
}
-static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
struct combination *combo;
- combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+
+ combo = drv_get_combination(bo->drv, format, use_flags);
if (!combo)
return -EINVAL;
- if (combo->metadata.tiling == TILE_TYPE_DRI)
+ if (combo->metadata.tiling == TILE_TYPE_DRI) {
+ bool needs_alignment = false;
+#ifdef __ANDROID__
+ /*
+ * Currently, the gralloc API doesn't differentiate between allocation time and map
+ * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
+ * allocation time.
+ *
+ * See b/115946221,b/117942643
+ */
+ if (use_flags & (BO_USE_SW_MASK))
+ needs_alignment = true;
+#endif
+ // See b/122049612
+ if (use_flags & (BO_USE_SCANOUT))
+ needs_alignment = true;
+
+ if (needs_alignment) {
+ uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
+ width = ALIGN(width, 256 / bytes_per_pixel);
+ }
+
+ return dri_bo_create(bo, width, height, format, use_flags);
+ }
+
+ return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
+}
+
+static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers,
+ uint32_t count)
+{
+ bool only_use_linear = true;
+
+ for (uint32_t i = 0; i < count; ++i)
+ if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
+ only_use_linear = false;
+
+ if (only_use_linear)
+ return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
+
+ return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
+}
+
+static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+{
+ bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
+ if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ struct combination *combo;
+ combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
+ }
+
+ if (dri_tiling)
return dri_bo_import(bo, data);
else
return drv_prime_bo_import(bo, data);
@@ -309,12 +333,14 @@
.init = amdgpu_init,
.close = amdgpu_close,
.bo_create = amdgpu_create_bo,
+ .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
.bo_destroy = amdgpu_destroy_bo,
.bo_import = amdgpu_import_bo,
.bo_map = amdgpu_map_bo,
.bo_unmap = amdgpu_unmap_bo,
.bo_invalidate = amdgpu_bo_invalidate,
.resolve_format = amdgpu_resolve_format,
+ .num_planes_from_modifier = dri_num_planes_from_modifier,
};
#endif
diff --git a/cros_gralloc/cros_gralloc_buffer.cc b/cros_gralloc/cros_gralloc_buffer.cc
index 01d4038..1066edc 100644
--- a/cros_gralloc/cros_gralloc_buffer.cc
+++ b/cros_gralloc/cros_gralloc_buffer.cc
@@ -108,3 +108,9 @@
return 0;
}
+
+int32_t cros_gralloc_buffer::resource_info(uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ return drv_resource_info(bo_, strides, offsets);
+}
diff --git a/cros_gralloc/cros_gralloc_buffer.h b/cros_gralloc/cros_gralloc_buffer.h
index e6aec91..ebd72ec 100644
--- a/cros_gralloc/cros_gralloc_buffer.h
+++ b/cros_gralloc/cros_gralloc_buffer.h
@@ -26,6 +26,7 @@
int32_t lock(const struct rectangle *rect, uint32_t map_flags,
uint8_t *addr[DRV_MAX_PLANES]);
int32_t unlock();
+ int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES]);
private:
cros_gralloc_buffer(cros_gralloc_buffer const &);
diff --git a/cros_gralloc/cros_gralloc_driver.cc b/cros_gralloc/cros_gralloc_driver.cc
index 89897e0..62b43d4 100644
--- a/cros_gralloc/cros_gralloc_driver.cc
+++ b/cros_gralloc/cros_gralloc_driver.cc
@@ -333,6 +333,26 @@
return 0;
}
+int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ return buffer->resource_info(strides, offsets);
+}
+
cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd)
{
/* Assumes driver mutex is held. */
diff --git a/cros_gralloc/cros_gralloc_driver.h b/cros_gralloc/cros_gralloc_driver.h
index 45782c9..f051277 100644
--- a/cros_gralloc/cros_gralloc_driver.h
+++ b/cros_gralloc/cros_gralloc_driver.h
@@ -31,6 +31,8 @@
int32_t unlock(buffer_handle_t handle, int32_t *release_fence);
int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store);
+ int32_t resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
private:
cros_gralloc_driver(cros_gralloc_driver const &);
diff --git a/cros_gralloc/gralloc0/gralloc0.cc b/cros_gralloc/gralloc0/gralloc0.cc
index 004b76b..a70498a 100644
--- a/cros_gralloc/gralloc0/gralloc0.cc
+++ b/cros_gralloc/gralloc0/gralloc0.cc
@@ -4,6 +4,7 @@
* found in the LICENSE file.
*/
+#include "../../util.h"
#include "../cros_gralloc_driver.h"
#include <cassert>
@@ -261,6 +262,8 @@
uint64_t *out_store;
buffer_handle_t handle;
uint32_t *out_width, *out_height, *out_stride;
+ uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
auto mod = (struct gralloc0_module const *)module;
switch (op) {
@@ -286,7 +289,17 @@
switch (op) {
case GRALLOC_DRM_GET_STRIDE:
out_stride = va_arg(args, uint32_t *);
- *out_stride = hnd->pixel_stride;
+ ret = mod->driver->resource_info(handle, strides, offsets);
+ if (ret)
+ break;
+
+ if (strides[0] != hnd->strides[0]) {
+ uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
+ *out_stride = DIV_ROUND_UP(strides[0], bytes_per_pixel);
+ } else {
+ *out_stride = hnd->pixel_stride;
+ }
+
break;
case GRALLOC_DRM_GET_FORMAT:
out_format = va_arg(args, int32_t *);
@@ -364,6 +377,8 @@
{
int32_t ret;
uint32_t map_flags;
+ uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr };
auto mod = (struct gralloc0_module const *)module;
struct rectangle rect = { .x = static_cast<uint32_t>(l),
@@ -393,13 +408,23 @@
if (ret)
return ret;
+ if (!map_flags) {
+ ret = mod->driver->resource_info(handle, strides, offsets);
+ if (ret)
+ return ret;
+
+ for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++)
+ addr[plane] =
+ reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(offsets[plane]));
+ }
+
switch (hnd->format) {
case DRM_FORMAT_NV12:
ycbcr->y = addr[0];
ycbcr->cb = addr[1];
ycbcr->cr = addr[1] + 1;
- ycbcr->ystride = hnd->strides[0];
- ycbcr->cstride = hnd->strides[1];
+ ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0];
+ ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1];
ycbcr->chroma_step = 2;
break;
case DRM_FORMAT_YVU420:
@@ -407,8 +432,8 @@
ycbcr->y = addr[0];
ycbcr->cb = addr[2];
ycbcr->cr = addr[1];
- ycbcr->ystride = hnd->strides[0];
- ycbcr->cstride = hnd->strides[1];
+ ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0];
+ ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1];
ycbcr->chroma_step = 1;
break;
default:
diff --git a/cros_gralloc/gralloc0/tests/gralloctest.c b/cros_gralloc/gralloc0/tests/gralloctest.c
index 9160e62..8dfcd0b 100644
--- a/cros_gralloc/gralloc0/tests/gralloctest.c
+++ b/cros_gralloc/gralloc0/tests/gralloctest.c
@@ -95,7 +95,7 @@
// clang-format on
struct grallocinfo {
- buffer_handle_t handle; /* handle to the buffer */
+ buffer_handle_t handle; /* handle to the buffer */
int w; /* width of buffer */
int h; /* height of buffer */
int format; /* format of the buffer */
diff --git a/dri.c b/dri.c
index 950d616..97dc567 100644
--- a/dri.c
+++ b/dri.c
@@ -65,29 +65,6 @@
}
/*
- * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
- * to import into minigbm.
- */
-static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
-{
- uint32_t handle;
- int prime_fd, ret;
-
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
- return -errno;
-
- ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
- if (ret) {
- drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
- return ret;
- }
-
- bo->handles[0].u32 = handle;
- close(prime_fd);
- return 0;
-}
-
-/*
* Close Gem Handle
*/
static void close_gem_handle(uint32_t handle, int fd)
@@ -103,6 +80,118 @@
}
/*
+ * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
+ * to import into minigbm.
+ */
+static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
+{
+ uint32_t handle;
+ int ret, modifier_upper, modifier_lower, num_planes, i, j;
+ off_t dmabuf_sizes[DRV_MAX_PLANES];
+ __DRIimage *plane_image = NULL;
+
+ if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
+ &modifier_upper) &&
+ dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER,
+ &modifier_lower)) {
+ bo->meta.format_modifiers[0] =
+ ((uint64_t)modifier_upper << 32) | (uint32_t)modifier_lower;
+ } else {
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_INVALID;
+ }
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_NUM_PLANES,
+ &num_planes)) {
+ return -errno;
+ }
+
+ bo->meta.num_planes = num_planes;
+
+ for (i = 0; i < num_planes; ++i) {
+ int prime_fd, stride, offset;
+ plane_image = dri->image_extension->fromPlanar(bo->priv, i, NULL);
+ __DRIimage *image = plane_image ? plane_image : bo->priv;
+
+ if (i)
+ bo->meta.format_modifiers[i] = bo->meta.format_modifiers[0];
+
+ if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride) ||
+ !dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &prime_fd)) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ dmabuf_sizes[i] = lseek(prime_fd, 0, SEEK_END);
+ if (dmabuf_sizes[i] == (off_t)-1) {
+ ret = -errno;
+ close(prime_fd);
+ goto cleanup;
+ }
+
+ lseek(prime_fd, 0, SEEK_SET);
+
+ ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
+
+ close(prime_fd);
+
+ if (ret) {
+ drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ goto cleanup;
+ }
+
+ bo->handles[i].u32 = handle;
+
+ bo->meta.strides[i] = stride;
+ bo->meta.offsets[i] = offset;
+
+ if (plane_image)
+ dri->image_extension->destroyImage(plane_image);
+ }
+
+ for (i = 0; i < num_planes; ++i) {
+ off_t next_plane = dmabuf_sizes[i];
+ for (j = 0; j < num_planes; ++j) {
+ if (bo->meta.offsets[j] < next_plane &&
+ bo->meta.offsets[j] > bo->meta.offsets[i] &&
+ bo->handles[j].u32 == bo->handles[i].u32)
+ next_plane = bo->meta.offsets[j];
+ }
+
+ bo->meta.sizes[i] = next_plane - bo->meta.offsets[i];
+
+ /* This is kind of misleading if different planes use
+ different dmabufs. */
+ bo->meta.total_size += bo->meta.sizes[i];
+ }
+
+ return 0;
+
+cleanup:
+ if (plane_image)
+ dri->image_extension->destroyImage(plane_image);
+ while (--i >= 0) {
+ for (j = 0; j <= i; ++j)
+ if (bo->handles[j].u32 == bo->handles[i].u32)
+ break;
+
+ /* Multiple equivalent handles) */
+ if (i == j)
+ break;
+
+ /* This kind of goes horribly wrong when we already imported
+ * the same handles earlier, as we should really reference
+ * count handles. */
+ close_gem_handle(bo->handles[i].u32, bo->drv->fd);
+ }
+ return ret;
+}
+
+/*
* The caller is responsible for setting drv->priv to a structure that derives from dri_driver.
*/
int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix)
@@ -190,11 +279,9 @@
uint64_t use_flags)
{
unsigned int dri_use;
- int ret, dri_format, stride, offset;
- int modifier_upper, modifier_lower;
+ int ret, dri_format;
struct dri_driver *dri = bo->drv->priv;
- assert(bo->meta.num_planes == 1);
dri_format = drm_format_to_dri_format(format);
/* Gallium drivers require shared to get the handle and stride. */
@@ -217,34 +304,38 @@
if (ret)
goto free_image;
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_STRIDE, &stride)) {
- ret = -errno;
- goto close_handle;
- }
-
- if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
- ret = -errno;
- goto close_handle;
- }
-
- if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
- &modifier_upper) &&
- dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER,
- &modifier_lower)) {
- bo->meta.format_modifiers[0] =
- ((uint64_t)modifier_upper << 32) | (uint32_t)modifier_lower;
- } else {
- bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_INVALID;
- }
-
- bo->meta.strides[0] = stride;
- bo->meta.sizes[0] = stride * height;
- bo->meta.offsets[0] = offset;
- bo->meta.total_size = offset + bo->meta.sizes[0];
return 0;
-close_handle:
- close_gem_handle(bo->handles[0].u32, bo->drv->fd);
+free_image:
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+}
+
+int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ const uint64_t *modifiers, uint32_t modifier_count)
+{
+ int ret, dri_format;
+ struct dri_driver *dri = bo->drv->priv;
+
+ if (!dri->image_extension->createImageWithModifiers) {
+ return -ENOENT;
+ }
+
+ dri_format = drm_format_to_dri_format(format);
+
+ bo->priv = dri->image_extension->createImageWithModifiers(
+ dri->device, width, height, dri_format, modifiers, modifier_count, NULL);
+ if (!bo->priv) {
+ ret = -errno;
+ return ret;
+ }
+
+ ret = import_into_minigbm(dri, bo);
+ if (ret)
+ goto free_image;
+
+ return 0;
+
free_image:
dri->image_extension->destroyImage(bo->priv);
return ret;
@@ -255,17 +346,41 @@
int ret;
struct dri_driver *dri = bo->drv->priv;
- assert(bo->meta.num_planes == 1);
+ if (data->format_modifiers[0] != DRM_FORMAT_MOD_INVALID) {
+ unsigned error;
- // clang-format off
- bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
- data->format, data->fds,
- bo->meta.num_planes,
- (int *)data->strides,
- (int *)data->offsets, NULL);
- // clang-format on
- if (!bo->priv)
- return -errno;
+ if (!dri->image_extension->createImageFromDmaBufs2)
+ return -ENOSYS;
+
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromDmaBufs2(dri->device, data->width, data->height,
+ data->format,
+ data->format_modifiers[0],
+ data->fds,
+ bo->meta.num_planes,
+ (int *)data->strides,
+ (int *)data->offsets,
+ __DRI_YUV_COLOR_SPACE_UNDEFINED,
+ __DRI_YUV_RANGE_UNDEFINED,
+ __DRI_YUV_CHROMA_SITING_UNDEFINED,
+ __DRI_YUV_CHROMA_SITING_UNDEFINED,
+ &error, NULL);
+ // clang-format on
+
+ /* Could translate the DRI error, but the Mesa GBM also returns ENOSYS. */
+ if (!bo->priv)
+ return -ENOSYS;
+ } else {
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
+ data->format, data->fds,
+ bo->meta.num_planes,
+ (int *)data->strides,
+ (int *)data->offsets, NULL);
+ // clang-format on
+ if (!bo->priv)
+ return -errno;
+ }
ret = import_into_minigbm(dri, bo);
if (ret) {
@@ -329,4 +444,22 @@
return 0;
}
+size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
+{
+ struct dri_driver *dri = drv->priv;
+ if (!dri->image_extension->queryDmaBufFormatModifierAttribs) {
+ /* We do not do any modifier checks here. The create will fail
+ * later if the modifier is not supported. */
+ return drv_num_planes_from_format(format);
+ }
+
+ uint64_t planes;
+ GLboolean ret = dri->image_extension->queryDmaBufFormatModifierAttribs(
+ dri->device, format, modifier, __DRI_IMAGE_ATTRIB_NUM_PLANES, &planes);
+ if (!ret)
+ return 0;
+
+ return planes;
+}
+
#endif
diff --git a/dri.h b/dri.h
index 9c72b1e..6218e82 100644
--- a/dri.h
+++ b/dri.h
@@ -30,9 +30,12 @@
void dri_close(struct driver *drv);
int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags);
+int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ const uint64_t *modifiers, uint32_t modifier_count);
int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data);
int dri_bo_destroy(struct bo *bo);
void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
int dri_bo_unmap(struct bo *bo, struct vma *vma);
+size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier);
#endif
diff --git a/drv.c b/drv.c
index e5e0be6..920cf4d 100644
--- a/drv.c
+++ b/drv.c
@@ -54,6 +54,9 @@
#ifdef DRV_ROCKCHIP
extern const struct backend backend_rockchip;
#endif
+#ifdef DRV_SYNAPTICS
+extern const struct backend backend_synaptics;
+#endif
#ifdef DRV_TEGRA
extern const struct backend backend_tegra;
#endif
@@ -104,6 +107,9 @@
#ifdef DRV_ROCKCHIP
&backend_rockchip,
#endif
+#ifdef DRV_SYNAPTICS
+ &backend_synaptics,
+#endif
#ifdef DRV_TEGRA
&backend_tegra,
#endif
@@ -111,7 +117,7 @@
#ifdef DRV_VC4
&backend_vc4,
#endif
- &backend_vgem, &backend_virtio_gpu,
+ &backend_vgem, &backend_virtio_gpu,
};
for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
@@ -278,11 +284,8 @@
if (drv->backend->bo_compute_metadata) {
ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
0);
- if (!is_test_alloc && ret == 0) {
+ if (!is_test_alloc && ret == 0)
ret = drv->backend->bo_create_from_metadata(bo);
- if (ret == 0)
- return bo;
- }
} else if (!is_test_alloc) {
ret = drv->backend->bo_create(bo, width, height, format, use_flags);
}
@@ -688,3 +691,17 @@
#endif
va_end(args);
}
+
+int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
+ strides[plane] = bo->meta.strides[plane];
+ offsets[plane] = bo->meta.offsets[plane];
+ }
+
+ if (bo->drv->backend->resource_info)
+ return bo->drv->backend->resource_info(bo, strides, offsets);
+
+ return 0;
+}
diff --git a/drv.h b/drv.h
index 937487b..2b86aad 100644
--- a/drv.h
+++ b/drv.h
@@ -175,8 +175,13 @@
size_t drv_num_planes_from_format(uint32_t format);
+size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier);
+
uint32_t drv_num_buffers_per_bo(struct bo *bo);
+int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
+
#define drv_log(format, ...) \
do { \
drv_log_prefix("minigbm", __FILE__, __LINE__, format, ##__VA_ARGS__); \
diff --git a/drv_priv.h b/drv_priv.h
index 31ab892..32c082d 100644
--- a/drv_priv.h
+++ b/drv_priv.h
@@ -79,6 +79,9 @@
int (*bo_invalidate)(struct bo *bo, struct mapping *mapping);
int (*bo_flush)(struct bo *bo, struct mapping *mapping);
uint32_t (*resolve_format)(struct driver *drv, uint32_t format, uint64_t use_flags);
+ size_t (*num_planes_from_modifier)(struct driver *drv, uint32_t format, uint64_t modifier);
+ int (*resource_info)(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES]);
};
// clang-format off
diff --git a/gbm.c b/gbm.c
index 4b62bbf..ab5b3f7 100644
--- a/gbm.c
+++ b/gbm.c
@@ -210,13 +210,17 @@
drv_data.format = fd_data->format;
drv_data.fds[0] = fd_data->fd;
drv_data.strides[0] = fd_data->stride;
+
+ for (i = 0; i < GBM_MAX_PLANES; ++i)
+ drv_data.format_modifiers[i] = DRM_FORMAT_MOD_INVALID;
break;
case GBM_BO_IMPORT_FD_MODIFIER:
gbm_format = fd_modifier_data->format;
drv_data.width = fd_modifier_data->width;
drv_data.height = fd_modifier_data->height;
drv_data.format = fd_modifier_data->format;
- num_planes = drv_num_planes_from_format(drv_data.format);
+ num_planes = drv_num_planes_from_modifier(gbm->drv, drv_data.format,
+ fd_modifier_data->modifier);
assert(num_planes);
num_fds = fd_modifier_data->num_fds;
diff --git a/helpers.c b/helpers.c
index 833a2d8..fed4af9 100644
--- a/helpers.c
+++ b/helpers.c
@@ -176,6 +176,20 @@
return layout ? layout->num_planes : 0;
}
+size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
+{
+ size_t planes = drv_num_planes_from_format(format);
+
+ /* Disallow unsupported formats. */
+ if (!planes)
+ return 0;
+
+ if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID)
+ return drv->backend->num_planes_from_modifier(drv, format, modifier);
+
+ return planes;
+}
+
uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
{
const struct planar_layout *layout = layout_from_format(format);
@@ -299,14 +313,17 @@
aligned_height = height;
switch (format) {
case DRM_FORMAT_YVU420_ANDROID:
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
+ * be aligned. Update 'height' so that drv_bo_from_format below
+ * uses the non-aligned height. */
+ height = bo->meta.height;
+
/* Align width to 32 pixels, so chroma strides are 16 bytes as
* Android requires. */
aligned_width = ALIGN(width, 32);
- /* Adjust the height to include room for chroma planes.
- *
- * HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
- * be aligned. */
- aligned_height = 3 * DIV_ROUND_UP(bo->meta.height, 2);
+
+ /* Adjust the height to include room for chroma planes. */
+ aligned_height = 3 * DIV_ROUND_UP(height, 2);
break;
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
diff --git a/i915.c b/i915.c
index 05c8272..92fd5b1 100644
--- a/i915.c
+++ b/i915.c
@@ -116,7 +116,8 @@
metadata.priority = 3;
metadata.modifier = I915_FORMAT_MOD_Y_TILED;
- scanout_and_render = unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
+ scanout_and_render =
+ unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY);
/* Support y-tiled NV12 and P010 for libva */
#ifdef I915_SCANOUT_Y_TILED
drv_add_combination(drv, DRM_FORMAT_NV12, &metadata,
@@ -272,7 +273,6 @@
uint64_t use_flags, const uint64_t *modifiers, uint32_t count)
{
static const uint64_t modifier_order[] = {
- I915_FORMAT_MOD_Y_TILED_CCS,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
diff --git a/mediatek.c b/mediatek.c
index 36d8115..cdfc9ab 100644
--- a/mediatek.c
+++ b/mediatek.c
@@ -49,7 +49,7 @@
struct format_metadata metadata;
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&LINEAR_METADATA, BO_USE_TEXTURE_MASK);
@@ -178,8 +178,8 @@
return MAP_FAILED;
}
- ret = drmPrimeHandleToFD(bo->drv->fd, gem_map.handle, DRM_CLOEXEC, &prime_fd);
- if (ret) {
+ prime_fd = drv_bo_get_plane_fd(bo, 0);
+ if (prime_fd < 0) {
drv_log("Failed to get a prime fd\n");
return MAP_FAILED;
}
diff --git a/meson.c b/meson.c
index 523bf71..f82c57a 100644
--- a/meson.c
+++ b/meson.c
@@ -10,12 +10,14 @@
#include "helpers.h"
#include "util.h"
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_BGR565};
static int meson_init(struct driver *drv)
{
- drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
+ drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
return drv_modify_linear_combinations(drv);
}
diff --git a/msm.c b/msm.c
index 5771f6e..fac1fd0 100644
--- a/msm.c
+++ b/msm.c
@@ -109,7 +109,7 @@
DRM_FORMAT_R8 of height one is used for JPEG camera output, so don't
height align that. */
if (bo->meta.format == DRM_FORMAT_YVU420_ANDROID ||
- (bo->meta.format == DRM_FORMAT_R8 && height == 1)) {
+ (bo->meta.format == DRM_FORMAT_R8 && height == 1)) {
alignh = height;
} else {
alignh = ALIGN(height, DEFAULT_ALIGNMENT);
diff --git a/presubmit.sh b/presubmit.sh
index 1cfc59c..5e8a32a 100755
--- a/presubmit.sh
+++ b/presubmit.sh
@@ -4,5 +4,6 @@
# found in the LICENSE file.
find \
'(' -name '*.[ch]' -or -name '*.cc' ')' \
+ -not -name 'virtgpu_drm.h' \
-not -name 'gbm.h' -not -name 'virgl_hw.h' \
-exec clang-format -style=file -i {} +
diff --git a/synaptics.c b/synaptics.c
new file mode 100644
index 0000000..28cb518
--- /dev/null
+++ b/synaptics.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifdef DRV_SYNAPTICS
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+static int synaptics_init(struct driver *drv)
+{
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_ENCODER);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+const struct backend backend_synaptics = {
+ .name = "synaptics",
+ .init = synaptics_init,
+ .bo_create = drv_dumb_bo_create,
+ .bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
+};
+
+#endif
diff --git a/vc4.c b/vc4.c
index 7af16c2..06b3ed7 100644
--- a/vc4.c
+++ b/vc4.c
@@ -28,14 +28,24 @@
return drv_modify_linear_combinations(drv);
}
-static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, uint64_t modifier)
{
int ret;
size_t plane;
uint32_t stride;
struct drm_vc4_create_bo bo_create;
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ drv_log("DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED not supported yet\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
/*
* Since the ARM L1 cache line size is 64 bytes, align to that as a
* performance optimization.
@@ -59,6 +69,31 @@
return 0;
}
+static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ return vc4_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier);
+}
+
+static int vc4_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers, uint32_t count)
+{
+ static const uint64_t modifier_order[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ };
+ uint64_t modifier;
+
+ modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+
+ return vc4_bo_create_for_modifier(bo, width, height, format, modifier);
+}
+
static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
@@ -82,6 +117,7 @@
.name = "vc4",
.init = vc4_init,
.bo_create = vc4_bo_create,
+ .bo_create_with_modifiers = vc4_bo_create_with_modifiers,
.bo_import = drv_prime_bo_import,
.bo_destroy = drv_gem_bo_destroy,
.bo_map = vc4_bo_map,
diff --git a/virtgpu_drm.h b/virtgpu_drm.h
new file mode 100644
index 0000000..a92d764
--- /dev/null
+++ b/virtgpu_drm.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRTGPU_DRM_H
+#define VIRTGPU_DRM_H
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define DRM_VIRTGPU_MAP 0x01
+#define DRM_VIRTGPU_EXECBUFFER 0x02
+#define DRM_VIRTGPU_GETPARAM 0x03
+#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
+#define DRM_VIRTGPU_RESOURCE_INFO 0x05
+#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
+#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
+#define DRM_VIRTGPU_WAIT 0x08
+#define DRM_VIRTGPU_GET_CAPS 0x09
+
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
+struct drm_virtgpu_map {
+ __u64 offset; /* use for mmap system call */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_virtgpu_execbuffer {
+ __u32 flags;
+ __u32 size;
+ __u64 command; /* void* */
+ __u64 bo_handles;
+ __u32 num_bo_handles;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+};
+
+#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+
+struct drm_virtgpu_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+/* NO_BO flags? NO resource flag? */
+/* resource flag for y_0_top */
+struct drm_virtgpu_resource_create {
+ __u32 target;
+ __u32 format;
+ __u32 bind;
+ __u32 width;
+ __u32 height;
+ __u32 depth;
+ __u32 array_size;
+ __u32 last_level;
+ __u32 nr_samples;
+ __u32 flags;
+ __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
+ __u32 res_handle; /* returned by kernel */
+ __u32 size; /* validate transfer in the host */
+ __u32 stride; /* validate transfer in the host */
+};
+
+struct drm_virtgpu_resource_info {
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u32 size;
+ union {
+ __u32 stride;
+ __u32 strides[4]; /* strides[0] is accessible with stride. */
+ };
+ __u32 num_planes;
+ __u32 offsets[4];
+ __u64 format_modifier;
+};
+
+struct drm_virtgpu_3d_box {
+ __u32 x;
+ __u32 y;
+ __u32 z;
+ __u32 w;
+ __u32 h;
+ __u32 d;
+};
+
+struct drm_virtgpu_3d_transfer_to_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+struct drm_virtgpu_3d_transfer_from_host {
+ __u32 bo_handle;
+ struct drm_virtgpu_3d_box box;
+ __u32 level;
+ __u32 offset;
+};
+
+#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
+struct drm_virtgpu_3d_wait {
+ __u32 handle; /* 0 is an invalid handle */
+ __u32 flags;
+};
+
+struct drm_virtgpu_get_caps {
+ __u32 cap_set_id;
+ __u32 cap_set_ver;
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+#define DRM_IOCTL_VIRTGPU_MAP \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
+
+#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ struct drm_virtgpu_execbuffer)
+
+#define DRM_IOCTL_VIRTGPU_GETPARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
+ struct drm_virtgpu_getparam)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
+ struct drm_virtgpu_resource_create)
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
+ struct drm_virtgpu_resource_info)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
+ struct drm_virtgpu_3d_transfer_from_host)
+
+#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
+ struct drm_virtgpu_3d_transfer_to_host)
+
+#define DRM_IOCTL_VIRTGPU_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
+ struct drm_virtgpu_3d_wait)
+
+#define DRM_IOCTL_VIRTGPU_GET_CAPS \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
+ struct drm_virtgpu_get_caps)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/virtio_gpu.c b/virtio_gpu.c
index e059497..4dbcc4f 100644
--- a/virtio_gpu.c
+++ b/virtio_gpu.c
@@ -9,13 +9,13 @@
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
-#include <virtgpu_drm.h>
#include <xf86drm.h>
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
#include "virgl_hw.h"
+#include "virtgpu_drm.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
@@ -25,6 +25,27 @@
#define MESA_LLVMPIPE_TILE_ORDER 6
#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+struct feature {
+ uint64_t feature;
+ const char *name;
+ uint32_t enabled;
+};
+
+enum feature_id {
+ feat_3d,
+ feat_capset_fix,
+ feat_max,
+};
+
+#define FEATURE(x) \
+ (struct feature) \
+ { \
+ x, #x, 0 \
+ }
+
+static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
+ FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
+
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
@@ -37,7 +58,6 @@
DRM_FORMAT_YVU420_ANDROID };
struct virtio_gpu_priv {
- int has_3d;
int caps_is_v2;
union virgl_caps caps;
};
@@ -89,7 +109,7 @@
{
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
- if (priv->has_3d && priv->caps.max_version >= 1) {
+ if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
if ((use_flags & BO_USE_RENDERING) &&
!virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
drv_log("Skipping unsupported render format: %d\n", drm_format);
@@ -239,21 +259,11 @@
{
int ret;
struct drm_virtgpu_get_caps cap_args;
- struct drm_virtgpu_getparam param_args;
- uint32_t can_query_v2 = 0;
-
- memset(¶m_args, 0, sizeof(param_args));
- param_args.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
- param_args.value = (uint64_t)(uintptr_t)&can_query_v2;
- ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶m_args);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_GETPARAM failed with %s\n", strerror(errno));
- }
*caps_is_v2 = 0;
memset(&cap_args, 0, sizeof(cap_args));
cap_args.addr = (unsigned long long)caps;
- if (can_query_v2) {
+ if (features[feat_capset_fix].enabled) {
*caps_is_v2 = 1;
cap_args.cap_set_id = 2;
cap_args.size = sizeof(union virgl_caps);
@@ -284,22 +294,20 @@
{
int ret;
struct virtio_gpu_priv *priv;
- struct drm_virtgpu_getparam args;
priv = calloc(1, sizeof(*priv));
drv->priv = priv;
+ for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
+ struct drm_virtgpu_getparam params = { 0 };
- memset(&args, 0, sizeof(args));
- args.param = VIRTGPU_PARAM_3D_FEATURES;
- args.value = (uint64_t)(uintptr_t)&priv->has_3d;
- ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
- if (ret) {
- drv_log("virtio 3D acceleration is not available\n");
- /* Be paranoid */
- priv->has_3d = 0;
+ params.param = features[i].feature;
+ params.value = (uint64_t)(uintptr_t)&features[i].enabled;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
+ if (ret)
+ drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
}
- if (priv->has_3d) {
+ if (features[feat_3d].enabled) {
virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
/* This doesn't mean host can scanout everything, it just means host
@@ -337,7 +345,7 @@
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
return drv_modify_linear_combinations(drv);
}
@@ -351,8 +359,7 @@
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return virtio_virgl_bo_create(bo, width, height, format, use_flags);
else
return virtio_dumb_bo_create(bo, width, height, format, use_flags);
@@ -360,8 +367,7 @@
static int virtio_gpu_bo_destroy(struct bo *bo)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return drv_gem_bo_destroy(bo);
else
return drv_dumb_bo_destroy(bo);
@@ -369,8 +375,7 @@
static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return virtio_virgl_bo_map(bo, vma, plane, map_flags);
else
return drv_dumb_bo_map(bo, vma, plane, map_flags);
@@ -380,10 +385,9 @@
{
int ret;
struct drm_virtgpu_3d_transfer_from_host xfer;
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
struct drm_virtgpu_3d_wait waitcmd;
- if (!priv->has_3d)
+ if (!features[feat_3d].enabled)
return 0;
// Invalidate is only necessary if the host writes to the buffer.
@@ -434,10 +438,9 @@
{
int ret;
struct drm_virtgpu_3d_transfer_to_host xfer;
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
struct drm_virtgpu_3d_wait waitcmd;
- if (!priv->has_3d)
+ if (!features[feat_3d].enabled)
return 0;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
@@ -482,7 +485,6 @@
static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
- struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
@@ -496,7 +498,7 @@
* All of our host drivers prefer NV12 as their flexible media format.
* If that changes, this will need to be modified.
*/
- if (priv->has_3d)
+ if (features[feat_3d].enabled)
return DRM_FORMAT_NV12;
else
return DRM_FORMAT_YVU420;
@@ -505,6 +507,37 @@
}
}
+static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
+ uint32_t offsets[DRV_MAX_PLANES])
+{
+ int ret;
+ struct drm_virtgpu_resource_info res_info;
+
+ if (!features[feat_3d].enabled)
+ return 0;
+
+ memset(&res_info, 0, sizeof(res_info));
+ res_info.bo_handle = bo->handles[0].u32;
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
+ /*
+ * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
+ * ioctl.
+ */
+ if (res_info.strides[plane]) {
+ strides[plane] = res_info.strides[plane];
+ offsets[plane] = res_info.offsets[plane];
+ }
+ }
+
+ return 0;
+}
+
const struct backend backend_virtio_gpu = {
.name = "virtio_gpu",
.init = virtio_gpu_init,
@@ -517,4 +550,5 @@
.bo_invalidate = virtio_gpu_bo_invalidate,
.bo_flush = virtio_gpu_bo_flush,
.resolve_format = virtio_gpu_resolve_format,
+ .resource_info = virtio_gpu_resource_info,
};