Merge remote-tracking branch 'aosp/upstream-main' into HEAD am: 81e3cdfbbc

Original change: https://android-review.googlesource.com/c/platform/external/libdrm/+/1858417

Change-Id: I2b286759ff6188e11a346593b6fa5ff40c783859
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b99dc91..26eaf83 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -14,7 +14,7 @@
 # repository's registry will be used there as well.
 variables:
   UPSTREAM_REPO: mesa/drm
-  DEBIAN_TAG: "2020-11-15"
+  DEBIAN_TAG: "2021-02-11"
   DEBIAN_VERSION: buster-slim
   DEBIAN_IMAGE: "$CI_REGISTRY_IMAGE/debian/$DEBIAN_VERSION:$DEBIAN_TAG"
 
@@ -116,10 +116,9 @@
     - if: '$SCHEDULE == "arch-daily"'
       when: on_success
     - when: never
-  image: archlinux/base
+  image: archlinux/archlinux:base-devel
   before_script:
     - pacman -Syu --noconfirm --needed
-        base-devel
         cairo
         cunit
         libatomic_ops
diff --git a/.gitlab-ci/debian-install.sh b/.gitlab-ci/debian-install.sh
index 886e808..ab90136 100644
--- a/.gitlab-ci/debian-install.sh
+++ b/.gitlab-ci/debian-install.sh
@@ -63,4 +63,4 @@
 
 
 # Test that the oldest Meson version we claim to support is still supported
-pip3 install meson==0.43
+pip3 install meson==0.46
diff --git a/README.rst b/README.rst
index da995d0..7460803 100644
--- a/README.rst
+++ b/README.rst
@@ -13,6 +13,24 @@
 libdrm is a low-level library, typically used by graphics drivers such as
 the Mesa drivers, the X drivers, libva and similar projects.
 
+Syncing with the Linux kernel headers
+-------------------------------------
+
+The library should be regularly updated to match the recent changes in the
+`include/uapi/drm/`.
+
+libdrm maintains a human-readable version for the token format modifier, with
+the simpler ones being extracted automatically from `drm_fourcc.h` header file
+with the help of a python script.  This might not always possible, as some of
+the vendors require decoding/extracting them programmatically.  For that
+reason one can enhance the current vendor functions to include/provide the
+newly added token formats, or, in case there's no such decoding
+function, to add one that performs the tasks of extracting them.
+
+For simpler format modifier tokens there's a script (gen_table_fourcc.py) that
+creates a static table, by going over `drm_fourcc.h` header file. The script
+could be further modified if it can't handle new (simpler) token format
+modifiers instead of the generated static table.
 
 Compiling
 ---------
diff --git a/amdgpu/amdgpu-symbols.txt b/amdgpu/amdgpu-symbols.txt
index e3bafaa..a2ed652 100644
--- a/amdgpu/amdgpu-symbols.txt
+++ b/amdgpu/amdgpu-symbols.txt
@@ -66,6 +66,7 @@
 amdgpu_query_hw_ip_info
 amdgpu_query_info
 amdgpu_query_sensor_info
+amdgpu_query_video_caps_info
 amdgpu_read_mm_registers
 amdgpu_va_range_alloc
 amdgpu_va_range_free
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 188179c..b118dd4 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1238,6 +1238,23 @@
 			     unsigned size, void *value);
 
 /**
+ * Query information about video capabilities
+ *
+ * The return sizeof(struct drm_amdgpu_info_video_caps)
+ *
+ * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param   caps_type   - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
+ * \param   size        - \c [in] Size of the returned value.
+ * \param   value       - \c [out] Pointer to the return value.
+ *
+ * \return   0 on success\n
+ *          <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+                                 unsigned size, void *value);
+
+/**
  * Read a set of consecutive memory-mapped registers.
  * Not all registers are allowed to be read by userspace.
  *
@@ -1263,6 +1280,7 @@
 */
 #define AMDGPU_VA_RANGE_32_BIT		0x1
 #define AMDGPU_VA_RANGE_HIGH		0x2
+#define AMDGPU_VA_RANGE_REPLAYABLE	0x4
 
 /**
  * Allocate virtual address range
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 5bdb8fe..54b1fb9 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -39,14 +39,6 @@
 #include "amdgpu_internal.h"
 #include "util_math.h"
 
-static int amdgpu_close_kms_handle(int fd, uint32_t handle)
-{
-	struct drm_gem_close args = {};
-
-	args.handle = handle;
-	return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
-}
-
 static int amdgpu_bo_create(amdgpu_device_handle dev,
 			    uint64_t size,
 			    uint32_t handle,
@@ -101,7 +93,7 @@
 			     buf_handle);
 	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r) {
-		amdgpu_close_kms_handle(dev->fd, args.out.handle);
+		drmCloseBufferHandle(dev->fd, args.out.handle);
 	}
 
 out:
@@ -216,7 +208,7 @@
 	bo->flink_name = flink.name;
 
 	if (bo->dev->flink_fd != bo->dev->fd)
-		amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
+		drmCloseBufferHandle(bo->dev->flink_fd, handle);
 
 	pthread_mutex_lock(&bo->dev->bo_table_mutex);
 	r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
@@ -342,8 +334,8 @@
 			close(dma_fd);
 			if (r)
 				goto free_bo_handle;
-			r = amdgpu_close_kms_handle(dev->flink_fd,
-						    open_arg.handle);
+			r = drmCloseBufferHandle(dev->flink_fd,
+						 open_arg.handle);
 			if (r)
 				goto free_bo_handle;
 		}
@@ -381,12 +373,12 @@
 
 free_bo_handle:
 	if (flink_name && open_arg.handle)
-		amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
+		drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
 
 	if (bo)
 		amdgpu_bo_free(bo);
 	else
-		amdgpu_close_kms_handle(dev->fd, handle);
+		drmCloseBufferHandle(dev->fd, handle);
 unlock:
 	pthread_mutex_unlock(&dev->bo_table_mutex);
 	return r;
@@ -415,7 +407,7 @@
 			amdgpu_bo_cpu_unmap(bo);
 		}
 
-		amdgpu_close_kms_handle(dev->fd, bo->handle);
+		drmCloseBufferHandle(dev->fd, bo->handle);
 		pthread_mutex_destroy(&bo->cpu_access_mutex);
 		free(bo);
 	}
@@ -598,7 +590,7 @@
 	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
 	pthread_mutex_unlock(&dev->bo_table_mutex);
 	if (r) {
-		amdgpu_close_kms_handle(dev->fd, args.handle);
+		drmCloseBufferHandle(dev->fd, args.handle);
 	}
 
 out:
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
index 777087f..9f8695c 100644
--- a/amdgpu/amdgpu_gpu_info.c
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -331,3 +331,18 @@
 	return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
 			       sizeof(struct drm_amdgpu_info));
 }
+
+drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+                                            unsigned size, void *value)
+{
+	struct drm_amdgpu_info request;
+
+	memset(&request, 0, sizeof(request));
+	request.return_pointer = (uintptr_t)value;
+	request.return_size = size;
+	request.query = AMDGPU_INFO_VIDEO_CAPS;
+	request.sensor_info.type = cap_type;
+
+	return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+			       sizeof(struct drm_amdgpu_info));
+}
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index d25d421..077a9fc 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -69,65 +69,99 @@
 	pthread_mutex_destroy(&mgr->bo_va_mutex);
 }
 
-static drm_private uint64_t
+static drm_private int
+amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
+			   uint64_t end_va)
+{
+	if (start_va > hole->offset && end_va - hole->offset < hole->size) {
+		struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+		if (!n)
+			return -ENOMEM;
+
+		n->size = start_va - hole->offset;
+		n->offset = hole->offset;
+		list_add(&n->list, &hole->list);
+
+		hole->size -= (end_va - hole->offset);
+		hole->offset = end_va;
+	} else if (start_va > hole->offset) {
+		hole->size = start_va - hole->offset;
+	} else if (end_va - hole->offset < hole->size) {
+		hole->size -= (end_va - hole->offset);
+		hole->offset = end_va;
+	} else {
+		list_del(&hole->list);
+		free(hole);
+	}
+
+	return 0;
+}
+
+static drm_private int
 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
-		     uint64_t alignment, uint64_t base_required)
+		     uint64_t alignment, uint64_t base_required,
+		     bool search_from_top, uint64_t *va_out)
 {
 	struct amdgpu_bo_va_hole *hole, *n;
-	uint64_t offset = 0, waste = 0;
+	uint64_t offset = 0;
+	int ret;
 
 
 	alignment = MAX2(alignment, mgr->va_alignment);
 	size = ALIGN(size, mgr->va_alignment);
 
 	if (base_required % alignment)
-		return AMDGPU_INVALID_VA_ADDRESS;
+		return -EINVAL;
 
 	pthread_mutex_lock(&mgr->bo_va_mutex);
-	LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
-		if (base_required) {
-			if (hole->offset > base_required ||
-			    (hole->offset + hole->size) < (base_required + size))
-				continue;
-			waste = base_required - hole->offset;
-			offset = base_required;
-		} else {
-			offset = hole->offset;
-			waste = offset % alignment;
-			waste = waste ? alignment - waste : 0;
-			offset += waste;
-			if (offset >= (hole->offset + hole->size)) {
-				continue;
+	if (!search_from_top) {
+		LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
+			if (base_required) {
+				if (hole->offset > base_required ||
+				   (hole->offset + hole->size) < (base_required + size))
+					continue;
+				offset = base_required;
+			} else {
+				uint64_t waste = hole->offset % alignment;
+				waste = waste ? alignment - waste : 0;
+				offset = hole->offset + waste;
+				if (offset >= (hole->offset + hole->size) ||
+				    size > (hole->offset + hole->size) - offset) {
+					continue;
+				}
 			}
-		}
-		if (!waste && hole->size == size) {
-			offset = hole->offset;
-			list_del(&hole->list);
-			free(hole);
+			ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
 			pthread_mutex_unlock(&mgr->bo_va_mutex);
-			return offset;
+			*va_out = offset;
+			return ret;
 		}
-		if ((hole->size - waste) > size) {
-			if (waste) {
-				n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-				n->size = waste;
-				n->offset = hole->offset;
-				list_add(&n->list, &hole->list);
+	} else {
+		LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+			if (base_required) {
+				if (hole->offset > base_required ||
+				   (hole->offset + hole->size) < (base_required + size))
+					continue;
+				offset = base_required;
+			} else {
+				if (size > hole->size)
+					continue;
+
+				offset = hole->offset + hole->size - size;
+				offset -= offset % alignment;
+				if (offset < hole->offset) {
+					continue;
+				}
 			}
-			hole->size -= (size + waste);
-			hole->offset += size + waste;
+
+			ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
 			pthread_mutex_unlock(&mgr->bo_va_mutex);
-			return offset;
-		}
-		if ((hole->size - waste) == size) {
-			hole->size = waste;
-			pthread_mutex_unlock(&mgr->bo_va_mutex);
-			return offset;
+			*va_out = offset;
+			return ret;
 		}
 	}
 
 	pthread_mutex_unlock(&mgr->bo_va_mutex);
-	return AMDGPU_INVALID_VA_ADDRESS;
+	return -ENOMEM;
 }
 
 static drm_private void
@@ -196,6 +230,8 @@
 				     uint64_t flags)
 {
 	struct amdgpu_bo_va_mgr *vamgr;
+	bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
+	int ret;
 
 	/* Clear the flag when the high VA manager is not initialized */
 	if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
@@ -216,21 +252,22 @@
 	va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
 	size = ALIGN(size, vamgr->va_alignment);
 
-	*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
-					va_base_alignment, va_base_required);
+	ret = amdgpu_vamgr_find_va(vamgr, size,
+				   va_base_alignment, va_base_required,
+				   search_from_top, va_base_allocated);
 
-	if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
-	    (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+	if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
 		/* fallback to 32bit address */
 		if (flags & AMDGPU_VA_RANGE_HIGH)
 			vamgr = &dev->vamgr_high_32;
 		else
 			vamgr = &dev->vamgr_32;
-		*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
-					va_base_alignment, va_base_required);
+		ret = amdgpu_vamgr_find_va(vamgr, size,
+					   va_base_alignment, va_base_required,
+					   search_from_top, va_base_allocated);
 	}
 
-	if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
+	if (!ret) {
 		struct amdgpu_va* va;
 		va = calloc(1, sizeof(struct amdgpu_va));
 		if(!va){
@@ -243,11 +280,9 @@
 		va->range = va_range_type;
 		va->vamgr = vamgr;
 		*va_range_handle = va;
-	} else {
-		return -EINVAL;
 	}
 
-	return 0;
+	return ret;
 }
 
 drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
diff --git a/amdgpu/meson.build b/amdgpu/meson.build
index d5c5f39..3301a10 100644
--- a/amdgpu/meson.build
+++ b/amdgpu/meson.build
@@ -21,7 +21,7 @@
 
 datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
 
-libdrm_amdgpu = shared_library(
+libdrm_amdgpu = library(
   'drm_amdgpu',
   [
     files(
diff --git a/core-symbols.txt b/core-symbols.txt
index 410054b..ed0d803 100644
--- a/core-symbols.txt
+++ b/core-symbols.txt
@@ -22,6 +22,7 @@
 drmAvailable
 drmCheckModesettingSupported
 drmClose
+drmCloseBufferHandle
 drmCloseOnce
 drmCommandNone
 drmCommandRead
@@ -196,3 +197,5 @@
 drmUnmapBufs
 drmUpdateDrawableInfo
 drmWaitVBlank
+drmGetFormatModifierName
+drmGetFormatModifierVendor
diff --git a/data/amdgpu.ids b/data/amdgpu.ids
index 93c1c76..ac5213b 100644
--- a/data/amdgpu.ids
+++ b/data/amdgpu.ids
@@ -65,6 +65,7 @@
 15D8,	92,	AMD Ryzen Embedded R1505G with Radeon Vega Gfx
 15D8,	CF,	AMD Ryzen Embedded R1305G with Radeon Vega Gfx
 15D8,	E4,	AMD Ryzen Embedded R1102G with Radeon Vega Gfx
+163F,	AE,	AMD Custom GPU 0405
 6600,	0,	AMD Radeon HD 8600/8700M
 6600,	81,	AMD Radeon (TM) R7 M370
 6601,	0,	AMD Radeon (TM) HD 8500M/8700M
@@ -135,7 +136,9 @@
 67C2,	01,	AMD Radeon (TM) Pro V7350x2
 67C2,	02,	AMD Radeon (TM) Pro V7300X
 67C4,	00,	AMD Radeon (TM) Pro WX 7100 Graphics
+67C4,	80,	AMD Radeon (TM) E9560/E9565 Graphics
 67C7,	00,	AMD Radeon (TM) Pro WX 5100 Graphics
+67C7,	80,	AMD Radeon (TM) E9390 Graphics
 67C0,	00,	AMD Radeon (TM) Pro WX 7100 Graphics
 67D0,	01,	AMD Radeon (TM) Pro V7350x2
 67D0,	02,	AMD Radeon (TM) Pro V7300X
@@ -271,9 +274,19 @@
 7340,	CF,	Radeon RX 5300
 7341,	00,	AMD Radeon Pro W5500
 7347,	00,	AMD Radeon Pro W5500M
+73A3,	00,	AMD Radeon PRO W6800
+73AF,	C0,	AMD Radeon RX 6900 XT
 73BF,	C0,	AMD Radeon RX 6900 XT
 73BF,	C1,	AMD Radeon RX 6800 XT
 73BF,	C3,	AMD Radeon RX 6800
+73DF,	C1,	AMD Radeon RX 6700 XT
+73DF,	C3,	AMD Radeon RX 6800M
+73DF,	C5,	AMD Radeon RX 6700 XT
+73DF,	CF,	AMD Radeon RX 6700M
+73E1,	00,	AMD Radeon PRO W6600M
+73E3,	00,	AMD Radeon PRO W6600
+73FF,	C1,	AMD Radeon RX 6600 XT
+73FF,	C3,	AMD Radeon RX 6600M
 9874,	C4,	AMD Radeon R7 Graphics
 9874,	C5,	AMD Radeon R6 Graphics
 9874,	C6,	AMD Radeon R6 Graphics
diff --git a/etnaviv/etnaviv_bo.c b/etnaviv/etnaviv_bo.c
index 43ce6b4..27123e6 100644
--- a/etnaviv/etnaviv_bo.c
+++ b/etnaviv/etnaviv_bo.c
@@ -48,12 +48,8 @@
 		drmHashDelete(bo->dev->name_table, bo->name);
 
 	if (bo->handle) {
-		struct drm_gem_close req = {
-			.handle = bo->handle,
-		};
-
 		drmHashDelete(bo->dev->handle_table, bo->handle);
-		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(bo->dev->fd, bo->handle);
 	}
 
 	free(bo);
@@ -82,12 +78,7 @@
 	struct etna_bo *bo = calloc(sizeof(*bo), 1);
 
 	if (!bo) {
-		struct drm_gem_close req = {
-			.handle = handle,
-		};
-
-		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
-
+		drmCloseBufferHandle(dev->fd, handle);
 		return NULL;
 	}
 
diff --git a/etnaviv/meson.build b/etnaviv/meson.build
index 6040cf6..8b82ed0 100644
--- a/etnaviv/meson.build
+++ b/etnaviv/meson.build
@@ -19,7 +19,7 @@
 # SOFTWARE.
 
 
-libdrm_etnaviv = shared_library(
+libdrm_etnaviv = library(
   'drm_etnaviv',
   [
     files(
diff --git a/exynos/exynos_drm.c b/exynos/exynos_drm.c
index b008ad7..3e322a1 100644
--- a/exynos/exynos_drm.c
+++ b/exynos/exynos_drm.c
@@ -176,11 +176,7 @@
 		munmap(bo->vaddr, bo->size);
 
 	if (bo->handle) {
-		struct drm_gem_close req = {
-			.handle = bo->handle,
-		};
-
-		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(bo->dev->fd, bo->handle);
 	}
 
 	free(bo);
diff --git a/exynos/meson.build b/exynos/meson.build
index 40d66fc..7d1edfe 100644
--- a/exynos/meson.build
+++ b/exynos/meson.build
@@ -18,7 +18,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-libdrm_exynos = shared_library(
+libdrm_exynos = library(
   'drm_exynos',
   [files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
   c_args : libdrm_c_args,
diff --git a/freedreno/freedreno_bo.c b/freedreno/freedreno_bo.c
index efc5b71..3cdc973 100644
--- a/freedreno/freedreno_bo.c
+++ b/freedreno/freedreno_bo.c
@@ -62,10 +62,7 @@
 
 	bo = dev->funcs->bo_from_handle(dev, size, handle);
 	if (!bo) {
-		struct drm_gem_close req = {
-				.handle = handle,
-		};
-		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(dev->fd, handle);
 		return NULL;
 	}
 	bo->dev = fd_device_ref(dev);
@@ -263,13 +260,10 @@
 	 */
 
 	if (bo->handle) {
-		struct drm_gem_close req = {
-				.handle = bo->handle,
-		};
 		drmHashDelete(bo->dev->handle_table, bo->handle);
 		if (bo->name)
 			drmHashDelete(bo->dev->name_table, bo->name);
-		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(bo->dev->fd, bo->handle);
 	}
 
 	bo->funcs->destroy(bo);
diff --git a/freedreno/meson.build b/freedreno/meson.build
index 63b84fc..49e6659 100644
--- a/freedreno/meson.build
+++ b/freedreno/meson.build
@@ -39,7 +39,7 @@
   )
 endif
 
-libdrm_freedreno = shared_library(
+libdrm_freedreno = library(
   'drm_freedreno',
   [files_freedreno, config_file],
   c_args : libdrm_c_args,
diff --git a/gen_table_fourcc.py b/gen_table_fourcc.py
new file mode 100644
index 0000000..4236fd7
--- /dev/null
+++ b/gen_table_fourcc.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+# Copyright 2021 Collabora, Ltd.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial
+# portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# Helper script that reads drm_fourcc.h and writes a static table with the
+# simpler format token modifiers
+
+import sys
+import re
+
+filename = sys.argv[1]
+towrite = sys.argv[2]
+
+fm_re = {
+    'intel': r'^#define I915_FORMAT_MOD_(\w+)',
+    'others': r'^#define DRM_FORMAT_MOD_((?:ARM|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
+    'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
+}
+
+def print_fm_intel(f, f_mod):
+    f.write('    {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
+
+# generic write func
+def print_fm(f, vendor, mod, f_name):
+    f.write('    {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
+
+with open(filename, "r") as f:
+    data = f.read()
+    for k, v in fm_re.items():
+        fm_re[k] = re.findall(v, data, flags=re.M)
+
+with open(towrite, "w") as f:
+    f.write('''\
+/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
+   that script instead of adding here entries manually! */
+static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
+''')
+    f.write('    { DRM_MODIFIER_INVALID(NONE, INVALID_MODIFIER) },\n')
+    f.write('    { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
+
+    for entry in fm_re['intel']:
+        print_fm_intel(f, entry)
+
+    for entry in fm_re['others']:
+        (vendor, mod) = entry.split('_', 1)
+        if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
+            continue
+        print_fm(f, vendor, mod, mod)
+
+    f.write('''\
+};
+''')
+
+    f.write('''\
+static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
+''')
+
+    for entry in fm_re['vendors']:
+        f.write("    {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
+
+    f.write('''\
+};
+''')
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 7fb9c09..0cbd154 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -116,8 +116,6 @@
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC		(1 << 2)
 /* Flag that the memory should be in VRAM and cleared */
 #define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
-/* Flag that create shadow bo(GTT) while allocating vram bo */
-#define AMDGPU_GEM_CREATE_SHADOW		(1 << 4)
 /* Flag that allocating the BO should use linear VRAM */
 #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS	(1 << 5)
 /* Flag that BO is always valid in this VM */
@@ -138,6 +136,10 @@
  * accessing it with various hw blocks
  */
 #define AMDGPU_GEM_CREATE_ENCRYPTED		(1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE		(1 << 11)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -755,6 +757,8 @@
 	#define AMDGPU_INFO_VBIOS_SIZE		0x1
 	/* Subquery id: Query vbios image */
 	#define AMDGPU_INFO_VBIOS_IMAGE		0x2
+	/* Subquery id: Query vbios info */
+	#define AMDGPU_INFO_VBIOS_INFO		0x3
 /* Query UVD handles */
 #define AMDGPU_INFO_NUM_HANDLES			0x1C
 /* Query sensor related information */
@@ -782,6 +786,12 @@
 #define AMDGPU_INFO_VRAM_LOST_COUNTER		0x1F
 /* query ras mask of enabled features*/
 #define AMDGPU_INFO_RAS_ENABLED_FEATURES	0x20
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS			0x21
+	/* Subquery id: Decode */
+	#define AMDGPU_INFO_VIDEO_CAPS_DECODE		0
+	/* Subquery id: Encode */
+	#define AMDGPU_INFO_VIDEO_CAPS_ENCODE		1
 
 /* RAS MASK: UMC (VRAM) */
 #define AMDGPU_INFO_RAS_ENABLED_UMC			(1 << 0)
@@ -878,6 +888,10 @@
 		struct {
 			__u32 type;
 		} sensor_info;
+
+		struct {
+			__u32 type;
+		} video_cap;
 	};
 };
 
@@ -938,6 +952,15 @@
 	__u32 feature;
 };
 
+struct drm_amdgpu_info_vbios {
+	__u8 name[64];
+	__u8 vbios_pn[64];
+	__u32 version;
+	__u32 pad;
+	__u8 vbios_ver_str[32];
+	__u8 date[32];
+};
+
 #define AMDGPU_VRAM_TYPE_UNKNOWN 0
 #define AMDGPU_VRAM_TYPE_GDDR1 1
 #define AMDGPU_VRAM_TYPE_DDR2  2
@@ -1074,6 +1097,30 @@
 	__u32 pad;
 };
 
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2			0
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4			1
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1			2
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC		3
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC			4
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG			5
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9			6
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1			7
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT			8
+
+struct drm_amdgpu_info_video_codec_info {
+	__u32 valid;
+	__u32 max_width;
+	__u32 max_height;
+	__u32 max_pixels_per_frame;
+	__u32 max_level;
+	__u32 pad;
+};
+
+struct drm_amdgpu_info_video_caps {
+	struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
+};
+
 /*
  * Supported GPU families
  */
@@ -1087,6 +1134,7 @@
 #define AMDGPU_FAMILY_RV			142 /* Raven */
 #define AMDGPU_FAMILY_NV			143 /* Navi10 */
 #define AMDGPU_FAMILY_VGH			144 /* Van Gogh */
+#define AMDGPU_FAMILY_YC			146 /* Yellow Carp */
 
 #if defined(__cplusplus)
 }
diff --git a/include/drm/drm.h b/include/drm/drm.h
index c7fd2a3..398c396 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
  * Header for the Direct Rendering Manager
  *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  *
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
  */
 
 /*
@@ -79,7 +78,7 @@
 typedef unsigned int drm_drawable_t;
 typedef unsigned int drm_magic_t;
 
-/**
+/*
  * Cliprect.
  *
  * \warning: If you change this structure, make sure you change
@@ -95,7 +94,7 @@
 	unsigned short y2;
 };
 
-/**
+/*
  * Drawable information.
  */
 struct drm_drawable_info {
@@ -103,7 +102,7 @@
 	struct drm_clip_rect *rects;
 };
 
-/**
+/*
  * Texture region,
  */
 struct drm_tex_region {
@@ -114,7 +113,7 @@
 	unsigned int age;
 };
 
-/**
+/*
  * Hardware lock.
  *
  * The lock structure is a simple cache-line aligned integer.  To avoid
@@ -126,7 +125,7 @@
 	char padding[60];			/**< Pad to cache line */
 };
 
-/**
+/*
  * DRM_IOCTL_VERSION ioctl argument type.
  *
  * \sa drmGetVersion().
@@ -143,7 +142,7 @@
 	char *desc;	  /**< User-space buffer to hold desc */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
  *
  * \sa drmGetBusid() and drmSetBusId().
@@ -162,7 +161,7 @@
 	int unused;
 };
 
-/**
+/*
  * DRM_IOCTL_CONTROL ioctl argument type.
  *
  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -177,7 +176,7 @@
 	int irq;
 };
 
-/**
+/*
  * Type of memory to map.
  */
 enum drm_map_type {
@@ -189,7 +188,7 @@
 	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
 };
 
-/**
+/*
  * Memory mapping flags.
  */
 enum drm_map_flags {
@@ -208,7 +207,7 @@
 	void *handle;		 /**< Handle of map */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
  * argument type.
  *
@@ -225,7 +224,7 @@
 	/*   Private data */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_CLIENT ioctl argument type.
  */
 struct drm_client {
@@ -257,7 +256,7 @@
 	    /* Add to the *END* of the list */
 };
 
-/**
+/*
  * DRM_IOCTL_GET_STATS ioctl argument type.
  */
 struct drm_stats {
@@ -268,7 +267,7 @@
 	} data[15];
 };
 
-/**
+/*
  * Hardware locking flags.
  */
 enum drm_lock_flags {
@@ -283,7 +282,7 @@
 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
 };
 
-/**
+/*
  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
  *
  * \sa drmGetLock() and drmUnlock().
@@ -293,7 +292,7 @@
 	enum drm_lock_flags flags;
 };
 
-/**
+/*
  * DMA flags
  *
  * \warning
@@ -322,7 +321,7 @@
 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
  *
  * \sa drmAddBufs().
@@ -345,7 +344,7 @@
 				  */
 };
 
-/**
+/*
  * DRM_IOCTL_INFO_BUFS ioctl argument type.
  */
 struct drm_buf_info {
@@ -353,7 +352,7 @@
 	struct drm_buf_desc *list;
 };
 
-/**
+/*
  * DRM_IOCTL_FREE_BUFS ioctl argument type.
  */
 struct drm_buf_free {
@@ -361,7 +360,7 @@
 	int *list;
 };
 
-/**
+/*
  * Buffer information
  *
  * \sa drm_buf_map.
@@ -373,7 +372,7 @@
 	void *address;	       /**< Address of buffer */
 };
 
-/**
+/*
  * DRM_IOCTL_MAP_BUFS ioctl argument type.
  */
 struct drm_buf_map {
@@ -386,7 +385,7 @@
 	struct drm_buf_pub *list;	/**< Buffer information */
 };
 
-/**
+/*
  * DRM_IOCTL_DMA ioctl argument type.
  *
  * Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -411,7 +410,7 @@
 	_DRM_CONTEXT_2DONLY = 0x02
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_CTX ioctl argument type.
  *
  * \sa drmCreateContext() and drmDestroyContext().
@@ -421,7 +420,7 @@
 	enum drm_ctx_flags flags;
 };
 
-/**
+/*
  * DRM_IOCTL_RES_CTX ioctl argument type.
  */
 struct drm_ctx_res {
@@ -429,14 +428,14 @@
 	struct drm_ctx *contexts;
 };
 
-/**
+/*
  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
  */
 struct drm_draw {
 	drm_drawable_t handle;
 };
 
-/**
+/*
  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
  */
 typedef enum {
@@ -450,14 +449,14 @@
 	unsigned long long data;
 };
 
-/**
+/*
  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
  */
 struct drm_auth {
 	drm_magic_t magic;
 };
 
-/**
+/*
  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
  *
  * \sa drmGetInterruptFromBusID().
@@ -499,7 +498,7 @@
 	long tval_usec;
 };
 
-/**
+/*
  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
  *
  * \sa drmWaitVBlank().
@@ -512,7 +511,7 @@
 #define _DRM_PRE_MODESET 1
 #define _DRM_POST_MODESET 2
 
-/**
+/*
  * DRM_IOCTL_MODESET_CTL ioctl argument type
  *
  * \sa drmModesetCtl().
@@ -522,7 +521,7 @@
 	__u32 cmd;
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
  *
  * \sa drmAgpEnable().
@@ -531,7 +530,7 @@
 	unsigned long mode;	/**< AGP mode */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
  *
  * \sa drmAgpAlloc() and drmAgpFree().
@@ -543,7 +542,7 @@
 	unsigned long physical;	/**< Physical used by i810 */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
  *
  * \sa drmAgpBind() and drmAgpUnbind().
@@ -553,7 +552,7 @@
 	unsigned long offset;	/**< In bytes -- will round to page boundary */
 };
 
-/**
+/*
  * DRM_IOCTL_AGP_INFO ioctl argument type.
  *
  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -574,7 +573,7 @@
 	unsigned short id_device;
 };
 
-/**
+/*
  * DRM_IOCTL_SG_ALLOC ioctl argument type.
  */
 struct drm_scatter_gather {
@@ -582,7 +581,7 @@
 	unsigned long handle;	/**< Used for mapping / unmapping */
 };
 
-/**
+/*
  * DRM_IOCTL_SET_VERSION ioctl argument type.
  */
 struct drm_set_version {
@@ -592,14 +591,14 @@
 	int drm_dd_minor;
 };
 
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
 struct drm_gem_close {
 	/** Handle of the object to be closed. */
 	__u32 handle;
 	__u32 pad;
 };
 
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
 struct drm_gem_flink {
 	/** Handle for the object being named */
 	__u32 handle;
@@ -608,7 +607,7 @@
 	__u32 name;
 };
 
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
 struct drm_gem_open {
 	/** Name of object being opened */
 	__u32 name;
@@ -620,33 +619,150 @@
 	__u64 size;
 };
 
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
 #define DRM_CAP_DUMB_BUFFER		0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a CRTC index in the high bits of
+ * &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
 #define DRM_CAP_VBLANK_HIGH_CRTC	0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
 #define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
 #define DRM_CAP_DUMB_PREFER_SHADOW	0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
 #define DRM_CAP_PRIME			0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
 #define  DRM_PRIME_CAP_IMPORT		0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
 #define  DRM_PRIME_CAP_EXPORT		0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
 #define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
  *
  * Note that the cross-driver contract is to merely return a valid size;
  * drivers are free to attach another meaning on top, eg. i915 returns the
  * maximum plane size.
  */
 #define DRM_CAP_CURSOR_WIDTH		0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
 #define DRM_CAP_CURSOR_HEIGHT		0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
 #define DRM_CAP_PAGE_FLIP_TARGET	0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
 #define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
 #define DRM_CAP_SYNCOBJ		0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
 
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
 	__u64 capability;
 	__u64 value;
@@ -655,9 +771,12 @@
 /**
  * DRM_CLIENT_CAP_STEREO_3D
  *
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
  * monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
  */
 #define DRM_CLIENT_CAP_STEREO_3D	1
 
@@ -666,13 +785,25 @@
  *
  * If set to 1, the DRM core will expose all planes (overlay, primary, and
  * cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
  */
 #define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
 
 /**
  * DRM_CLIENT_CAP_ATOMIC
  *
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
  */
 #define DRM_CLIENT_CAP_ATOMIC	3
 
@@ -680,6 +811,10 @@
  * DRM_CLIENT_CAP_ASPECT_RATIO
  *
  * If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
@@ -687,12 +822,15 @@
  * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
  *
  * If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
  */
 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
 
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;
 	__u64 value;
@@ -944,7 +1082,7 @@
 
 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
-/**
+/*
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
  * Generic IOCTLS restart at 0xA0.
@@ -955,7 +1093,7 @@
 #define DRM_COMMAND_BASE                0x40
 #define DRM_COMMAND_END			0xA0
 
-/**
+/*
  * Header for events written back to userspace on the drm fd.  The
  * type defines the type of event, the length specifies the total
  * length of the event (including the header), and user_data is
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index ed0258c..957c7be 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -168,6 +168,13 @@
 #define DRM_FORMAT_RGBA1010102	fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
 #define DRM_FORMAT_BGRA1010102	fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
 
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616	fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616	fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616	fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616	fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
 /*
  * Floating point 64bpp RGB
  * IEEE 754-2008 binary16 half-precision float
@@ -474,7 +481,7 @@
  * This is a tiled layout using 4Kb tiles in row-major layout.
  * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
  * are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consists out of four 256 byte units, which are also laid
+ * Each group therefore consits out of four 256 byte units, which are also laid
  * out as 2x2 column-major.
  * 256 byte units are made out of four 64 byte blocks of pixels, producing
  * either a square block or a 2:1 unit.
@@ -528,6 +535,25 @@
 #define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
 
 /*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
  * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
  *
  * Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -874,9 +900,9 @@
 
 /*
  * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
- * modifiers) denote the category for modifiers. Currently we have only two
- * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
- * different categories.
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
  */
 #define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
 	fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
@@ -992,6 +1018,109 @@
 #define AFBC_FORMAT_MOD_USM	(1ULL << 12)
 
 /*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size   Plane Alignment
+ * ----------------   ---------------
+ * 16 bytes           1024 bytes
+ * 24 bytes           512  bytes
+ * 32 bytes           2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout   Paging Tile Width   Paging Tile Height
+ * ------   -----------------   ------------------
+ * SCAN     16 coding units     4 coding units
+ * ROT      8  coding units     8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane   Layout      Coding Unit Width   Coding Unit Height
+ * -----------------------------   ---------   -----------------   ------------------
+ * 1                               SCAN        16 samples          4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ *          16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * -----------------------------   ---------   -----------------   ------------------
+ * 1                               ROT         8 samples           8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ *          8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * -----------------------------   ---------   -----------------   ------------------
+ * 2                               DONT CARE   8 samples           4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * -----------------------------   ---------   -----------------   ------------------
+ * 3                               DONT CARE   4 samples           4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * -----------------------------   ---------   -----------------   ------------------
+ * 4                               DONT CARE   4 samples           4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+	DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
+/*
  * Arm 16x16 Block U-Interleaved modifier
  *
  * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
@@ -1036,9 +1165,9 @@
  * Not all combinations are valid, and different SoCs may support different
  * combinations of layout and options.
  */
-#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_layout_mask 0xff
 #define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xf
+#define __fourcc_mod_amlogic_options_mask 0xff
 
 #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
 	fourcc_mod_code(AMLOGIC, \
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 96416e6..9b6722d 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -218,6 +218,27 @@
 #define DRM_MODE_CONTENT_PROTECTION_DESIRED     1
 #define DRM_MODE_CONTENT_PROTECTION_ENABLED     2
 
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
 struct drm_mode_modeinfo {
 	__u32 clock;
 	__u16 hdisplay;
@@ -367,28 +388,95 @@
 #define DRM_MODE_CONNECTOR_DPI		17
 #define DRM_MODE_CONNECTOR_WRITEBACK	18
 #define DRM_MODE_CONNECTOR_SPI		19
+#define DRM_MODE_CONNECTOR_USB		20
 
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
 struct drm_mode_get_connector {
-
+	/** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
 	__u64 encoders_ptr;
+	/** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
 	__u64 modes_ptr;
+	/** @props_ptr: Pointer to ``__u32`` array of property IDs. */
 	__u64 props_ptr;
+	/** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
 	__u64 prop_values_ptr;
 
+	/** @count_modes: Number of modes. */
 	__u32 count_modes;
+	/** @count_props: Number of properties. */
 	__u32 count_props;
+	/** @count_encoders: Number of encoders. */
 	__u32 count_encoders;
 
-	__u32 encoder_id; /**< Current Encoder */
-	__u32 connector_id; /**< Id */
+	/** @encoder_id: Object ID of the current encoder. */
+	__u32 encoder_id;
+	/** @connector_id: Object ID of the connector. */
+	__u32 connector_id;
+	/**
+	 * @connector_type: Type of the connector.
+	 *
+	 * See DRM_MODE_CONNECTOR_* defines.
+	 */
 	__u32 connector_type;
+	/**
+	 * @connector_type_id: Type-specific connector number.
+	 *
+	 * This is not an object ID. This is a per-type connector number. Each
+	 * (type, type_id) combination is unique across all connectors of a DRM
+	 * device.
+	 */
 	__u32 connector_type_id;
 
+	/**
+	 * @connection: Status of the connector.
+	 *
+	 * See enum drm_connector_status.
+	 */
 	__u32 connection;
-	__u32 mm_width;  /**< width in millimeters */
-	__u32 mm_height; /**< height in millimeters */
+	/** @mm_width: Width of the connected sink in millimeters. */
+	__u32 mm_width;
+	/** @mm_height: Height of the connected sink in millimeters. */
+	__u32 mm_height;
+	/**
+	 * @subpixel: Subpixel order of the connected sink.
+	 *
+	 * See enum subpixel_order.
+	 */
 	__u32 subpixel;
 
+	/** @pad: Padding, must be zero. */
 	__u32 pad;
 };
 
@@ -417,7 +505,7 @@
 /* the PROP_ATOMIC flag is used to hide properties from userspace that
  * is not aware of atomic properties.  This is mostly to work around
  * older userspace (DDX drivers) that read/write each prop they find,
- * without being aware that this could be triggering a lengthy modeset.
+ * witout being aware that this could be triggering a lengthy modeset.
  */
 #define DRM_MODE_PROP_ATOMIC        0x80000000
 
@@ -904,25 +992,24 @@
 };
 
 /**
- * struct drm_mode_create_blob - Create New block property
- * @data: Pointer to data to copy.
- * @length: Length of data to copy.
- * @blob_id: new property ID.
+ * struct drm_mode_create_blob - Create New blob property
+ *
  * Create a new 'blob' data property, copying length bytes from data pointer,
  * and returning new blob ID.
  */
 struct drm_mode_create_blob {
-	/** Pointer to data to copy. */
+	/** @data: Pointer to data to copy. */
 	__u64 data;
-	/** Length of data to copy. */
+	/** @length: Length of data to copy. */
 	__u32 length;
-	/** Return: new property ID. */
+	/** @blob_id: Return: new property ID. */
 	__u32 blob_id;
 };
 
 /**
  * struct drm_mode_destroy_blob - Destroy user blob
  * @blob_id: blob_id to destroy
+ *
  * Destroy a user-created blob property.
  *
  * User-space can release blobs as soon as they do not need to refer to them by
@@ -937,36 +1024,32 @@
 
 /**
  * struct drm_mode_create_lease - Create lease
- * @object_ids: Pointer to array of object ids.
- * @object_count: Number of object ids.
- * @flags: flags for new FD.
- * @lessee_id: unique identifier for lessee.
- * @fd: file descriptor to new drm_master file.
+ *
  * Lease mode resources, creating another drm_master.
  */
 struct drm_mode_create_lease {
-	/** Pointer to array of object ids (__u32) */
+	/** @object_ids: Pointer to array of object ids (__u32) */
 	__u64 object_ids;
-	/** Number of object ids */
+	/** @object_count: Number of object ids */
 	__u32 object_count;
-	/** flags for new FD (O_CLOEXEC, etc) */
+	/** @flags: flags for new FD (O_CLOEXEC, etc) */
 	__u32 flags;
 
-	/** Return: unique identifier for lessee. */
+	/** @lessee_id: Return: unique identifier for lessee. */
 	__u32 lessee_id;
-	/** Return: file descriptor to new drm_master file */
+	/** @fd: Return: file descriptor to new drm_master file */
 	__u32 fd;
 };
 
 /**
  * struct drm_mode_list_lessees - List lessees
- * @count_lessees: Number of lessees.
- * @pad: pad.
- * @lessees_ptr: Pointer to lessess.
- * List lesses from a drm_master
+ *
+ * List lesses from a drm_master.
  */
 struct drm_mode_list_lessees {
-	/** Number of lessees.
+	/**
+	 * @count_lessees: Number of lessees.
+	 *
 	 * On input, provides length of the array.
 	 * On output, provides total number. No
 	 * more than the input number will be written
@@ -974,23 +1057,26 @@
 	 * the size and then the data.
 	 */
 	__u32 count_lessees;
+	/** @pad: Padding. */
 	__u32 pad;
 
-	/** Pointer to lessees.
-	 * pointer to __u64 array of lessee ids
+	/**
+	 * @lessees_ptr: Pointer to lessees.
+	 *
+	 * Pointer to __u64 array of lessee ids
 	 */
 	__u64 lessees_ptr;
 };
 
 /**
  * struct drm_mode_get_lease - Get Lease
- * @count_objects: Number of leased objects.
- * @pad: pad.
- * @objects_ptr: Pointer to objects.
- * Get leased objects
+ *
+ * Get leased objects.
  */
 struct drm_mode_get_lease {
-	/** Number of leased objects.
+	/**
+	 * @count_objects: Number of leased objects.
+	 *
 	 * On input, provides length of the array.
 	 * On output, provides total number. No
 	 * more than the input number will be written
@@ -998,22 +1084,22 @@
 	 * the size and then the data.
 	 */
 	__u32 count_objects;
+	/** @pad: Padding. */
 	__u32 pad;
 
-	/** Pointer to objects.
-	 * pointer to __u32 array of object ids
+	/**
+	 * @objects_ptr: Pointer to objects.
+	 *
+	 * Pointer to __u32 array of object ids.
 	 */
 	__u64 objects_ptr;
 };
 
 /**
  * struct drm_mode_revoke_lease - Revoke lease
- * @lessee_id: Unique ID of lessee.
- * Revoke lease
  */
 struct drm_mode_revoke_lease {
-	/** Unique ID of lessee
-	 */
+	/** @lessee_id: Unique ID of lessee */
 	__u32 lessee_id;
 };
 
diff --git a/intel/i915_pciids.h b/intel/i915_pciids.h
index ebd0dd1..c00ac54 100644
--- a/intel/i915_pciids.h
+++ b/intel/i915_pciids.h
@@ -632,17 +632,38 @@
 	INTEL_VGA_DEVICE(0x4905, info), \
 	INTEL_VGA_DEVICE(0x4906, info), \
 	INTEL_VGA_DEVICE(0x4907, info), \
-	INTEL_VGA_DEVICE(0x4908, info)
+	INTEL_VGA_DEVICE(0x4908, info), \
+	INTEL_VGA_DEVICE(0x4909, info)
 
 /* ADL-S */
 #define INTEL_ADLS_IDS(info) \
 	INTEL_VGA_DEVICE(0x4680, info), \
-	INTEL_VGA_DEVICE(0x4681, info), \
 	INTEL_VGA_DEVICE(0x4682, info), \
-	INTEL_VGA_DEVICE(0x4683, info), \
+	INTEL_VGA_DEVICE(0x4688, info), \
+	INTEL_VGA_DEVICE(0x468A, info), \
 	INTEL_VGA_DEVICE(0x4690, info), \
-	INTEL_VGA_DEVICE(0x4691, info), \
 	INTEL_VGA_DEVICE(0x4692, info), \
 	INTEL_VGA_DEVICE(0x4693, info)
 
+/* ADL-P */
+#define INTEL_ADLP_IDS(info) \
+	INTEL_VGA_DEVICE(0x46A0, info), \
+	INTEL_VGA_DEVICE(0x46A1, info), \
+	INTEL_VGA_DEVICE(0x46A2, info), \
+	INTEL_VGA_DEVICE(0x46A3, info), \
+	INTEL_VGA_DEVICE(0x46A6, info), \
+	INTEL_VGA_DEVICE(0x46A8, info), \
+	INTEL_VGA_DEVICE(0x46AA, info), \
+	INTEL_VGA_DEVICE(0x462A, info), \
+	INTEL_VGA_DEVICE(0x4626, info), \
+	INTEL_VGA_DEVICE(0x4628, info), \
+	INTEL_VGA_DEVICE(0x46B0, info), \
+	INTEL_VGA_DEVICE(0x46B1, info), \
+	INTEL_VGA_DEVICE(0x46B2, info), \
+	INTEL_VGA_DEVICE(0x46B3, info), \
+	INTEL_VGA_DEVICE(0x46C0, info), \
+	INTEL_VGA_DEVICE(0x46C1, info), \
+	INTEL_VGA_DEVICE(0x46C2, info), \
+	INTEL_VGA_DEVICE(0x46C3, info)
+
 #endif /* _I915_PCIIDS_H */
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 023af61..b28ea74 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -114,7 +114,6 @@
 
 	pthread_mutex_t lock;
 
-	struct drm_i915_gem_exec_object *exec_objects;
 	struct drm_i915_gem_exec_object2 *exec2_objects;
 	drm_intel_bo **exec_bos;
 	int exec_size;
@@ -480,44 +479,6 @@
  * access flags.
  */
 static void
-drm_intel_add_validate_buffer(drm_intel_bo *bo)
-{
-	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-	int index;
-
-	if (bo_gem->validate_index != -1)
-		return;
-
-	/* Extend the array of validation entries as necessary. */
-	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
-		int new_size = bufmgr_gem->exec_size * 2;
-
-		if (new_size == 0)
-			new_size = 5;
-
-		bufmgr_gem->exec_objects =
-		    realloc(bufmgr_gem->exec_objects,
-			    sizeof(*bufmgr_gem->exec_objects) * new_size);
-		bufmgr_gem->exec_bos =
-		    realloc(bufmgr_gem->exec_bos,
-			    sizeof(*bufmgr_gem->exec_bos) * new_size);
-		bufmgr_gem->exec_size = new_size;
-	}
-
-	index = bufmgr_gem->exec_count;
-	bo_gem->validate_index = index;
-	/* Fill in array entry */
-	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
-	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
-	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
-	bufmgr_gem->exec_objects[index].alignment = bo->align;
-	bufmgr_gem->exec_objects[index].offset = 0;
-	bufmgr_gem->exec_bos[index] = bo;
-	bufmgr_gem->exec_count++;
-}
-
-static void
 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -1191,7 +1152,6 @@
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-	struct drm_gem_close close;
 	int ret;
 
 	DRMLISTDEL(&bo_gem->vma_list);
@@ -1215,11 +1175,9 @@
 	HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
 
 	/* Close this object */
-	memclear(close);
-	close.handle = bo_gem->gem_handle;
-	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+	ret = drmCloseBufferHandle(bufmgr_gem->fd, bo_gem->gem_handle);
 	if (ret != 0) {
-		DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+		DBG("drmCloseBufferHandle %d failed (%s): %s\n",
 		    bo_gem->gem_handle, bo_gem->name, strerror(errno));
 	}
 	free(bo);
@@ -1732,6 +1690,82 @@
 	return drm_intel_gem_bo_unmap(bo);
 }
 
+static bool is_cache_coherent(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+	struct drm_i915_gem_caching arg = {};
+
+	arg.handle = bo_gem->gem_handle;
+	if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_CACHING, &arg))
+		assert(false);
+	return arg.caching != I915_CACHING_NONE;
+}
+
+static void set_domain(drm_intel_bo *bo, uint32_t read, uint32_t write)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+	struct drm_i915_gem_set_domain arg = {};
+
+	arg.handle = bo_gem->gem_handle;
+	arg.read_domains = read;
+	arg.write_domain = write;
+	if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &arg))
+		assert(false);
+}
+
+static int mmap_write(drm_intel_bo *bo, unsigned long offset,
+		      unsigned long length, const void *buf)
+{
+	void *map = NULL;
+
+	if (!length)
+		return 0;
+
+	if (is_cache_coherent(bo)) {
+		map = drm_intel_gem_bo_map__cpu(bo);
+		if (map)
+			set_domain(bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	}
+	if (!map) {
+		map = drm_intel_gem_bo_map__wc(bo);
+		if (map)
+			set_domain(bo, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
+	}
+
+	assert(map);
+	memcpy((char *)map + offset, buf, length);
+	drm_intel_gem_bo_unmap(bo);
+	return 0;
+}
+
+static int mmap_read(drm_intel_bo *bo, unsigned long offset,
+		      unsigned long length, void *buf)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	void *map = NULL;
+
+	if (!length)
+		return 0;
+
+	if (bufmgr_gem->has_llc || is_cache_coherent(bo)) {
+		map = drm_intel_gem_bo_map__cpu(bo);
+		if (map)
+			set_domain(bo, I915_GEM_DOMAIN_CPU, 0);
+	}
+	if (!map) {
+		map = drm_intel_gem_bo_map__wc(bo);
+		if (map)
+			set_domain(bo, I915_GEM_DOMAIN_WC, 0);
+	}
+
+	assert(map);
+	memcpy(buf, (char *)map + offset, length);
+	drm_intel_gem_bo_unmap(bo);
+	return 0;
+}
+
 static int
 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
 			 unsigned long size, const void *data)
@@ -1752,14 +1786,20 @@
 	ret = drmIoctl(bufmgr_gem->fd,
 		       DRM_IOCTL_I915_GEM_PWRITE,
 		       &pwrite);
-	if (ret != 0) {
+	if (ret)
 		ret = -errno;
+
+	if (ret != 0 && ret != -EOPNOTSUPP) {
 		DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
 		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
 		    (int)size, strerror(errno));
+		return ret;
 	}
 
-	return ret;
+	if (ret == -EOPNOTSUPP)
+		mmap_write(bo, offset, size, data);
+
+	return 0;
 }
 
 static int
@@ -1807,14 +1847,20 @@
 	ret = drmIoctl(bufmgr_gem->fd,
 		       DRM_IOCTL_I915_GEM_PREAD,
 		       &pread);
-	if (ret != 0) {
+	if (ret)
 		ret = -errno;
+
+	if (ret != 0 && ret != -EOPNOTSUPP) {
 		DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
 		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
 		    (int)size, strerror(errno));
+		return ret;
 	}
 
-	return ret;
+	if (ret == -EOPNOTSUPP)
+		mmap_read(bo, offset, size, data);
+
+	return 0;
 }
 
 /** Waits for all GPU rendering with the object to have completed. */
@@ -1914,11 +1960,9 @@
 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
-	struct drm_gem_close close_bo;
 	int i, ret;
 
 	free(bufmgr_gem->exec2_objects);
-	free(bufmgr_gem->exec_objects);
 	free(bufmgr_gem->exec_bos);
 
 	pthread_mutex_destroy(&bufmgr_gem->lock);
@@ -1940,9 +1984,8 @@
 
 	/* Release userptr bo kept hanging around for optimisation. */
 	if (bufmgr_gem->userptr_active.ptr) {
-		memclear(close_bo);
-		close_bo.handle = bufmgr_gem->userptr_active.handle;
-		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+		ret = drmCloseBufferHandle(bufmgr_gem->fd,
+					   bufmgr_gem->userptr_active.handle);
 		free(bufmgr_gem->userptr_active.ptr);
 		if (ret)
 			fprintf(stderr,
@@ -2178,31 +2221,6 @@
  * index values into the validation list.
  */
 static void
-drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
-{
-	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-	int i;
-
-	if (bo_gem->relocs == NULL)
-		return;
-
-	for (i = 0; i < bo_gem->reloc_count; i++) {
-		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
-
-		if (target_bo == bo)
-			continue;
-
-		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
-
-		/* Continue walking the tree depth-first. */
-		drm_intel_gem_bo_process_reloc(target_bo);
-
-		/* Add the target to the validate list */
-		drm_intel_add_validate_buffer(target_bo);
-	}
-}
-
-static void
 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
 {
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
@@ -2242,30 +2260,6 @@
 	}
 }
 
-
-static void
-drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
-{
-	int i;
-
-	for (i = 0; i < bufmgr_gem->exec_count; i++) {
-		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
-		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-
-		/* Update the buffer offset */
-		if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
-			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
-			    bo_gem->gem_handle, bo_gem->name,
-			    upper_32_bits(bo->offset64),
-			    lower_32_bits(bo->offset64),
-			    upper_32_bits(bufmgr_gem->exec_objects[i].offset),
-			    lower_32_bits(bufmgr_gem->exec_objects[i].offset));
-			bo->offset64 = bufmgr_gem->exec_objects[i].offset;
-			bo->offset = bufmgr_gem->exec_objects[i].offset;
-		}
-	}
-}
-
 static void
 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
 {
@@ -2302,73 +2296,6 @@
 }
 
 static int
-drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
-		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
-{
-	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-	struct drm_i915_gem_execbuffer execbuf;
-	int ret, i;
-
-	if (to_bo_gem(bo)->has_error)
-		return -ENOMEM;
-
-	pthread_mutex_lock(&bufmgr_gem->lock);
-	/* Update indices and set up the validate list. */
-	drm_intel_gem_bo_process_reloc(bo);
-
-	/* Add the batch buffer to the validation list.  There are no
-	 * relocations pointing to it.
-	 */
-	drm_intel_add_validate_buffer(bo);
-
-	memclear(execbuf);
-	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
-	execbuf.buffer_count = bufmgr_gem->exec_count;
-	execbuf.batch_start_offset = 0;
-	execbuf.batch_len = used;
-	execbuf.cliprects_ptr = (uintptr_t) cliprects;
-	execbuf.num_cliprects = num_cliprects;
-	execbuf.DR1 = 0;
-	execbuf.DR4 = DR4;
-
-	ret = drmIoctl(bufmgr_gem->fd,
-		       DRM_IOCTL_I915_GEM_EXECBUFFER,
-		       &execbuf);
-	if (ret != 0) {
-		ret = -errno;
-		if (errno == ENOSPC) {
-			DBG("Execbuffer fails to pin. "
-			    "Estimate: %u. Actual: %u. Available: %u\n",
-			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
-							       bufmgr_gem->
-							       exec_count),
-			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
-							      bufmgr_gem->
-							      exec_count),
-			    (unsigned int)bufmgr_gem->gtt_size);
-		}
-	}
-	drm_intel_update_buffer_offsets(bufmgr_gem);
-
-	if (bufmgr_gem->bufmgr.debug)
-		drm_intel_gem_dump_validation_list(bufmgr_gem);
-
-	for (i = 0; i < bufmgr_gem->exec_count; i++) {
-		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
-
-		bo_gem->idle = false;
-
-		/* Disconnect the buffer from the validate list */
-		bo_gem->validate_index = -1;
-		bufmgr_gem->exec_bos[i] = NULL;
-	}
-	bufmgr_gem->exec_count = 0;
-	pthread_mutex_unlock(&bufmgr_gem->lock);
-
-	return ret;
-}
-
-static int
 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
 	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
 	 int in_fence, int *out_fence,
@@ -2845,9 +2772,7 @@
 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-
-	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
-		bufmgr_gem->fenced_relocs = true;
+	bufmgr_gem->fenced_relocs = true;
 }
 
 /**
@@ -3612,7 +3537,6 @@
 	struct drm_i915_gem_get_aperture aperture;
 	drm_i915_getparam_t gp;
 	int ret, tmp;
-	bool exec2 = false;
 
 	pthread_mutex_lock(&bufmgr_list_mutex);
 
@@ -3686,8 +3610,12 @@
 
 	gp.param = I915_PARAM_HAS_EXECBUF2;
 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-	if (!ret)
-		exec2 = true;
+	if (ret) {
+		fprintf(stderr, "i915 does not support EXECBUFER2\n");
+		free(bufmgr_gem);
+		bufmgr_gem = NULL;
+        goto exit;
+    }
 
 	gp.param = I915_PARAM_HAS_BSD;
 	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
@@ -3790,12 +3718,8 @@
 	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
 	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
 	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
-	/* Use the new one if available */
-	if (exec2) {
-		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
-		bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
-	} else
-		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+	bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
+	bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
 	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
diff --git a/intel/intel_chipset.c b/intel/intel_chipset.c
index fda3de1..f0da6d8 100644
--- a/intel/intel_chipset.c
+++ b/intel/intel_chipset.c
@@ -35,6 +35,7 @@
 	uint16_t gen;
 } pciids[] = {
 	/* Keep ids sorted by gen; latest gen first */
+	INTEL_ADLP_IDS(12),
 	INTEL_ADLS_IDS(12),
 	INTEL_RKL_IDS(12),
 	INTEL_DG1_IDS(12),
diff --git a/intel/intel_decode.c b/intel/intel_decode.c
index e0a5166..be6f779 100644
--- a/intel/intel_decode.c
+++ b/intel/intel_decode.c
@@ -3815,33 +3815,36 @@
 drm_intel_decode_context_alloc(uint32_t devid)
 {
 	struct drm_intel_decode *ctx;
+	int gen = 0;
+
+	if (intel_get_genx(devid, &gen))
+		;
+	else if (IS_GEN8(devid))
+		gen = 8;
+	else if (IS_GEN7(devid))
+		gen = 7;
+	else if (IS_GEN6(devid))
+		gen = 6;
+	else if (IS_GEN5(devid))
+		gen = 5;
+	else if (IS_GEN4(devid))
+		gen = 4;
+	else if (IS_9XX(devid))
+		gen = 3;
+	else if (IS_GEN2(devid))
+		gen = 2;
+
+	if (!gen)
+		return NULL;
 
 	ctx = calloc(1, sizeof(struct drm_intel_decode));
 	if (!ctx)
 		return NULL;
 
 	ctx->devid = devid;
+	ctx->gen = gen;
 	ctx->out = stdout;
 
-	if (intel_get_genx(devid, &ctx->gen))
-		;
-	else if (IS_GEN8(devid))
-		ctx->gen = 8;
-	else if (IS_GEN7(devid))
-		ctx->gen = 7;
-	else if (IS_GEN6(devid))
-		ctx->gen = 6;
-	else if (IS_GEN5(devid))
-		ctx->gen = 5;
-	else if (IS_GEN4(devid))
-		ctx->gen = 4;
-	else if (IS_9XX(devid))
-		ctx->gen = 3;
-	else {
-		assert(IS_GEN2(devid));
-		ctx->gen = 2;
-	}
-
 	return ctx;
 }
 
diff --git a/intel/meson.build b/intel/meson.build
index 4d3f1eb..5fa06c2 100644
--- a/intel/meson.build
+++ b/intel/meson.build
@@ -18,7 +18,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-libdrm_intel = shared_library(
+libdrm_intel = library(
   'drm_intel',
   [
     files(
diff --git a/libkms/meson.build b/libkms/meson.build
index 216be4d..8d17bb2 100644
--- a/libkms/meson.build
+++ b/libkms/meson.build
@@ -41,7 +41,7 @@
   libkms_include += include_directories('../exynos')
 endif
 
-libkms = shared_library(
+libkms = library(
   'kms',
   [files_libkms, config_file],
   c_args : libdrm_c_args,
diff --git a/libkms/vmwgfx.c b/libkms/vmwgfx.c
index f0e40be..1984399 100644
--- a/libkms/vmwgfx.c
+++ b/libkms/vmwgfx.c
@@ -25,6 +25,9 @@
  *
  **************************************************************************/
 
+#ifdef __FreeBSD__
+#define _WANT_KERNEL_ERRNO
+#endif
 
 #include <errno.h>
 #include <stdlib.h>
diff --git a/man/drm-memory.7.rst b/man/drm-memory.7.rst
index c272c99..7d09eeb 100644
--- a/man/drm-memory.7.rst
+++ b/man/drm-memory.7.rst
@@ -169,19 +169,10 @@
 or look at the driver-dependent man-pages (for example **drm-intel**\ (7) or
 **drm-radeon**\ (7)).
 
-GEM-buffers can be closed with the ``DRM_IOCTL_GEM_CLOSE`` ioctl. It takes as
-argument a structure of type ``struct drm_gem_close``:
-
-::
-
-   struct drm_gem_close {
-       __u32 handle;
-       __u32 pad;
-   };
-
-The *handle* field is the GEM-handle to be closed. The *pad* field is unused
-padding. It must be zeroed. After this call the GEM handle cannot be used by
-this process anymore and may be reused for new GEM objects by the GEM API.
+GEM-buffers can be closed with **drmCloseBufferHandle**\ (3). It takes as
+argument the GEM-handle to be closed. After this call the GEM handle cannot be
+used by this process anymore and may be reused for new GEM objects by the GEM
+API.
 
 If you want to share GEM-objects between different processes, you can create a
 name for them and pass this name to other processes which can then open this
diff --git a/meson.build b/meson.build
index 07d2e08..7ccfe84 100644
--- a/meson.build
+++ b/meson.build
@@ -21,9 +21,9 @@
 project(
   'libdrm',
   ['c'],
-  version : '2.4.104',
+  version : '2.4.107',
   license : 'MIT',
-  meson_version : '>= 0.43',
+  meson_version : '>= 0.46',
   default_options : ['buildtype=debugoptimized', 'c_std=gnu99'],
 )
 
@@ -44,6 +44,8 @@
 
 cc = meson.get_compiler('c')
 
+android = cc.compiles('''int func() { return __ANDROID__; }''')
+
 symbols_check = find_program('symbols-check.py')
 prog_nm = find_program('nm')
 
@@ -51,6 +53,11 @@
 intel_atomics = false
 lib_atomics = false
 
+python3 = import('python').find_installation()
+format_mod_static_table = custom_target('format_mod_static_table',
+  output : 'generated_static_table_fourcc.h', input: 'include/drm/drm_fourcc.h',
+  command : [python3, files('gen_table_fourcc.py'), '@INPUT@', '@OUTPUT@'])
+
 dep_atomic_ops = dependency('atomic_ops', required : false)
 if cc.links('''
     int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
@@ -166,7 +173,7 @@
 with_libkms = false
 _libkms = get_option('libkms')
 if _libkms != 'false'
-  with_libkms = _libkms == 'true' or ['linux', 'freebsd', 'dragonfly'].contains(host_machine.system())
+  with_libkms = _libkms == 'true' or (['linux', 'freebsd', 'dragonfly'].contains(host_machine.system()) and not android)
 endif
 
 # Among others FreeBSD does not have a separate dl library.
@@ -294,20 +301,29 @@
 inc_root = include_directories('.')
 inc_drm = include_directories('include/drm')
 
-libdrm = shared_library(
-  'drm',
-  [files(
-     'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
-     'xf86drmMode.c'
-   ),
-   config_file,
-  ],
-  c_args : libdrm_c_args,
-  dependencies : [dep_valgrind, dep_rt, dep_m],
-  include_directories : inc_drm,
-  version : '2.4.0',
-  install : true,
-)
+libdrm_files = [files(
+   'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
+   'xf86drmMode.c'
+  ),
+  config_file, format_mod_static_table
+]
+
+if android
+  libdrm = library('drm', libdrm_files,
+    c_args : libdrm_c_args,
+    dependencies : [dep_valgrind, dep_rt, dep_m],
+    include_directories : inc_drm,
+    install : true,
+  )
+else
+  libdrm = library('drm', libdrm_files,
+    c_args : libdrm_c_args,
+    dependencies : [dep_valgrind, dep_rt, dep_m],
+    include_directories : inc_drm,
+    install : true,
+    version: '2.4.0'
+  )
+endif
 
 test(
   'core-symbols-check',
diff --git a/nouveau/meson.build b/nouveau/meson.build
index 9bd58fc..af45336 100644
--- a/nouveau/meson.build
+++ b/nouveau/meson.build
@@ -19,7 +19,7 @@
 # SOFTWARE.
 
 
-libdrm_nouveau = shared_library(
+libdrm_nouveau = library(
   'drm_nouveau',
   [files( 'nouveau.c', 'pushbuf.c', 'bufctx.c', 'abi16.c'), config_file],
   c_args : libdrm_c_args,
diff --git a/nouveau/nouveau.c b/nouveau/nouveau.c
index f18d142..7b4efde 100644
--- a/nouveau/nouveau.c
+++ b/nouveau/nouveau.c
@@ -46,19 +46,35 @@
 #include "nvif/ioctl.h"
 #include "nvif/unpack.h"
 
-#ifdef DEBUG
+drm_private FILE *nouveau_out = NULL;
 drm_private uint32_t nouveau_debug = 0;
 
 static void
-debug_init(char *args)
+debug_init(void)
 {
-	if (args) {
-		int n = strtol(args, NULL, 0);
+	static bool once = false;
+	char *debug, *out;
+
+	if (once)
+		return;
+	once = true;
+
+	debug = getenv("NOUVEAU_LIBDRM_DEBUG");
+	if (debug) {
+		int n = strtol(debug, NULL, 0);
 		if (n >= 0)
 			nouveau_debug = n;
+
+	}
+
+	nouveau_out = stderr;
+	out = getenv("NOUVEAU_LIBDRM_OUT");
+	if (out) {
+		FILE *fout = fopen(out, "w");
+		if (fout)
+			nouveau_out = fout;
 	}
 }
-#endif
 
 static int
 nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
@@ -327,9 +343,7 @@
 	struct nouveau_drm *drm;
 	drmVersionPtr ver;
 
-#ifdef DEBUG
-	debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
-#endif
+	debug_init();
 
 	if (!(drm = calloc(1, sizeof(*drm))))
 		return -ENOMEM;
@@ -593,7 +607,6 @@
 	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
 	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
-	struct drm_gem_close req = { .handle = bo->handle };
 
 	if (nvbo->head.next) {
 		pthread_mutex_lock(&nvdev->lock);
@@ -607,11 +620,11 @@
 			 * might cause the bo to be closed accidentally while
 			 * re-importing.
 			 */
-			drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+			drmCloseBufferHandle(drm->fd, bo->handle);
 		}
 		pthread_mutex_unlock(&nvdev->lock);
 	} else {
-		drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(drm->fd, bo->handle);
 	}
 	if (bo->map)
 		drm_munmap(bo->map, bo->size);
diff --git a/nouveau/private.h b/nouveau/private.h
index 034a958..b81d4b1 100644
--- a/nouveau/private.h
+++ b/nouveau/private.h
@@ -1,6 +1,8 @@
 #ifndef __NOUVEAU_LIBDRM_PRIVATE_H__
 #define __NOUVEAU_LIBDRM_PRIVATE_H__
 
+#include <stdio.h>
+
 #include <libdrm_macros.h>
 #include <xf86drm.h>
 #include <xf86atomic.h>
@@ -9,18 +11,19 @@
 
 #include "nouveau.h"
 
-#ifdef DEBUG
+/*
+ * 0x00000001 dump all pushbuffers
+ * 0x00000002 submit pushbuffers synchronously
+ * 0x80000000 if compiled with SIMULATE return -EINVAL for all pb submissions
+ */
 drm_private extern uint32_t nouveau_debug;
+drm_private extern FILE *nouveau_out;
 #define dbg_on(lvl) (nouveau_debug & (1 << lvl))
 #define dbg(lvl, fmt, args...) do {                                            \
 	if (dbg_on((lvl)))                                                     \
-		fprintf(stderr, "nouveau: "fmt, ##args);                       \
+		fprintf(nouveau_out, "nouveau: "fmt, ##args);                       \
 } while(0)
-#else
-#define dbg_on(lvl) (0)
-#define dbg(lvl, fmt, args...)
-#endif
-#define err(fmt, args...) fprintf(stderr, "nouveau: "fmt, ##args)
+#define err(fmt, args...) fprintf(nouveau_out, "nouveau: "fmt, ##args)
 
 struct nouveau_client_kref {
 	struct drm_nouveau_gem_pushbuf_bo *kref;
diff --git a/nouveau/pushbuf.c b/nouveau/pushbuf.c
index e5f73f0..5fadd7a 100644
--- a/nouveau/pushbuf.c
+++ b/nouveau/pushbuf.c
@@ -29,6 +29,7 @@
 #include <string.h>
 #include <assert.h>
 #include <errno.h>
+#include <inttypes.h>
 
 #include <xf86drm.h>
 #include <xf86atomic.h>
@@ -274,9 +275,10 @@
 
 	kref = krec->buffer;
 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
-		err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
+		bo = (void *)(uintptr_t)kref->user_priv;
+		err("ch%d: buf %08x %08x %08x %08x %08x %p 0x%"PRIx64" 0x%"PRIx64"\n", chid, i,
 		    kref->handle, kref->valid_domains,
-		    kref->read_domains, kref->write_domains);
+		    kref->read_domains, kref->write_domains, bo->map, bo->offset, bo->size);
 	}
 
 	krel = krec->reloc;
@@ -292,11 +294,14 @@
 		kref = krec->buffer + kpsh->bo_index;
 		bo = (void *)(unsigned long)kref->user_priv;
 		bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
-		end = bgn + (kpsh->length /4);
+		end = bgn + ((kpsh->length & 0x7fffff) /4);
 
-		err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
+		err("ch%d: psh %s%08x %010llx %010llx\n", chid,
+		    bo->map ? "" : "(unmapped) ", kpsh->bo_index,
 		    (unsigned long long)kpsh->offset,
 		    (unsigned long long)(kpsh->offset + kpsh->length));
+		if (!bo->map)
+			continue;
 		while (bgn < end)
 			err("\t0x%08x\n", *bgn++);
 	}
@@ -336,6 +341,8 @@
 		req.suffix0 = nvpb->suffix0;
 		req.suffix1 = nvpb->suffix1;
 		req.vram_available = 0; /* for valgrind */
+		if (dbg_on(1))
+			req.vram_available |= NOUVEAU_GEM_PUSHBUF_SYNC;
 		req.gart_available = 0;
 
 		if (dbg_on(0))
diff --git a/omap/meson.build b/omap/meson.build
index 53330b6..bfd59f0 100644
--- a/omap/meson.build
+++ b/omap/meson.build
@@ -18,7 +18,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-libdrm_omap = shared_library(
+libdrm_omap = library(
   'drm_omap',
   [files('omap_drm.c'), config_file],
   include_directories : [inc_root, inc_drm],
diff --git a/omap/omap_drm.c b/omap/omap_drm.c
index ffacea6..aa27366 100644
--- a/omap/omap_drm.c
+++ b/omap/omap_drm.c
@@ -174,10 +174,7 @@
 {
 	struct omap_bo *bo = calloc(sizeof(*bo), 1);
 	if (!bo) {
-		struct drm_gem_close req = {
-				.handle = handle,
-		};
-		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(dev->fd, handle);
 		return NULL;
 	}
 	bo->dev = omap_device_ref(dev);
@@ -365,12 +362,9 @@
 	}
 
 	if (bo->handle) {
-		struct drm_gem_close req = {
-				.handle = bo->handle,
-		};
 		pthread_mutex_lock(&table_lock);
 		drmHashDelete(bo->dev->handle_table, bo->handle);
-		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+		drmCloseBufferHandle(bo->dev->fd, bo->handle);
 		pthread_mutex_unlock(&table_lock);
 	}
 
diff --git a/radeon/meson.build b/radeon/meson.build
index ca12832..31fe9cd 100644
--- a/radeon/meson.build
+++ b/radeon/meson.build
@@ -19,7 +19,7 @@
 # SOFTWARE.
 
 
-libdrm_radeon = shared_library(
+libdrm_radeon = library(
   'drm_radeon',
   [
     files(
diff --git a/radeon/radeon_bo_gem.c b/radeon/radeon_bo_gem.c
index 86f7c00..bbe72ce 100644
--- a/radeon/radeon_bo_gem.c
+++ b/radeon/radeon_bo_gem.c
@@ -125,7 +125,6 @@
 static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
 {
     struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
-    struct drm_gem_close args;
 
     if (boi->cref) {
         return (struct radeon_bo *)boi;
@@ -134,12 +133,8 @@
         drm_munmap(bo_gem->priv_ptr, boi->size);
     }
 
-    /* Zero out args to make valgrind happy */
-    memset(&args, 0, sizeof(args));
-
     /* close object */
-    args.handle = boi->handle;
-    drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
+    drmCloseBufferHandle(boi->bom->fd, boi->handle);
     memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
     free(bo_gem);
     return NULL;
diff --git a/tegra/meson.build b/tegra/meson.build
index 88613b9..edddf72 100644
--- a/tegra/meson.build
+++ b/tegra/meson.build
@@ -18,7 +18,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-libdrm_tegra = shared_library(
+libdrm_tegra = library(
   'drm_tegra',
   [files('tegra.c'), config_file],
   include_directories : [inc_root, inc_drm],
diff --git a/tegra/tegra.c b/tegra/tegra.c
index cf00a3c..420b171 100644
--- a/tegra/tegra.c
+++ b/tegra/tegra.c
@@ -38,15 +38,11 @@
 static void drm_tegra_bo_free(struct drm_tegra_bo *bo)
 {
 	struct drm_tegra *drm = bo->drm;
-	struct drm_gem_close args;
 
 	if (bo->map)
 		munmap(bo->map, bo->size);
 
-	memset(&args, 0, sizeof(args));
-	args.handle = bo->handle;
-
-	drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &args);
+	drmCloseBufferHandle(drm->fd, bo->handle);
 
 	free(bo);
 }
diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
index 8c86767..c4224c9 100644
--- a/tests/amdgpu/amdgpu_test.c
+++ b/tests/amdgpu/amdgpu_test.c
@@ -37,6 +37,18 @@
 #include <sys/time.h>
 #include <stdarg.h>
 #include <stdint.h>
+#ifdef __linux__
+#include <linux/limits.h>
+#elif __FreeBSD__
+/* SPECNAMELEN in FreeBSD is defined here: */
+#include <sys/param.h>
+#endif
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
 
 #include "drm.h"
 #include "xf86drmMode.h"
@@ -59,6 +71,7 @@
 #define RAS_TESTS_STR "RAS Tests"
 #define SYNCOBJ_TIMELINE_TESTS_STR "SYNCOBJ TIMELINE Tests"
 #define SECURITY_TESTS_STR "Security Tests"
+#define HOTUNPLUG_TESTS_STR "Hotunplug Tests"
 
 /**
  *  Open handles for amdgpu devices
@@ -137,6 +150,12 @@
 		.pCleanupFunc = suite_security_tests_clean,
 		.pTests = security_tests,
 	},
+	{
+		.pName = HOTUNPLUG_TESTS_STR,
+		.pInitFunc = suite_hotunplug_tests_init,
+		.pCleanupFunc = suite_hotunplug_tests_clean,
+		.pTests = hotunplug_tests,
+	},
 
 	CU_SUITE_INFO_NULL,
 };
@@ -198,6 +217,10 @@
 			.pName = SECURITY_TESTS_STR,
 			.pActive = suite_security_tests_enable,
 		},
+		{
+			.pName = HOTUNPLUG_TESTS_STR,
+			.pActive = suite_hotunplug_tests_enable,
+		},
 };
 
 
@@ -339,12 +362,13 @@
 
 /* Close AMD devices.
  */
-static void amdgpu_close_devices()
+void amdgpu_close_devices()
 {
 	int i;
 	for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
-		if (drm_amdgpu[i] >=0)
+		if (drm_amdgpu[i] >=0) {
 			close(drm_amdgpu[i]);
+		}
 }
 
 /* Print AMD devices information */
@@ -430,7 +454,8 @@
 {
 	amdgpu_device_handle device_handle;
 	uint32_t major_version, minor_version, family_id;
-	int i;
+	drmDevicePtr devices[MAX_CARDS_SUPPORTED];
+	int i, drm_count;
 	int size = sizeof(suites_active_stat) / sizeof(suites_active_stat[0]);
 
 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
@@ -442,6 +467,8 @@
 	if (amdgpu_device_deinitialize(device_handle))
 		return;
 
+	drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
+
 	/* Set active status for suites based on their policies */
 	for (i = 0; i < size; ++i)
 		if (amdgpu_set_suite_active(suites_active_stat[i].pName,
@@ -496,9 +523,6 @@
 				"gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", CU_FALSE))
 			fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
 
-	if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))
-		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
-
 	if (amdgpu_set_test_active(BASIC_TESTS_STR, "bo eviction Test", CU_FALSE))
 		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
 
@@ -524,6 +548,84 @@
 	//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
 		if (amdgpu_set_test_active(BASIC_TESTS_STR, "GPU reset Test", CU_FALSE))
 			fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
+
+	/* You need at least 2 devices for this	*/
+	if (drm_count < 2)
+		if (amdgpu_set_test_active(HOTUNPLUG_TESTS_STR, "Unplug with exported fence", CU_FALSE))
+			fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
+}
+
+int test_device_index;
+
+int amdgpu_open_device_on_test_index(int render_node)
+{
+	int i;
+
+	if (amdgpu_open_devices(open_render_node) <= 0) {
+		perror("Cannot open AMDGPU device");
+		return -1;
+	}
+
+	if (test_device_index >= 0) {
+		/* Most tests run on device of drm_amdgpu[0].
+		 * Swap the chosen device to drm_amdgpu[0].
+		 */
+		i = drm_amdgpu[0];
+		drm_amdgpu[0] = drm_amdgpu[test_device_index];
+		drm_amdgpu[test_device_index] = i;
+	}
+
+	return 0;
+
+
+}
+
+
+static bool amdgpu_node_is_drm(int maj, int min)
+{
+#ifdef __linux__
+    char path[64];
+    struct stat sbuf;
+
+    snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device/drm",
+             maj, min);
+    return stat(path, &sbuf) == 0;
+#elif defined(__FreeBSD__)
+    char name[SPECNAMELEN];
+
+    if (!devname_r(makedev(maj, min), S_IFCHR, name, sizeof(name)))
+      return 0;
+    /* Handle drm/ and dri/ as both are present in different FreeBSD version
+     * FreeBSD on amd64/i386/powerpc external kernel modules create node in
+     * in /dev/drm/ and links in /dev/dri while a WIP in kernel driver creates
+     * only device nodes in /dev/dri/ */
+    return (!strncmp(name, "drm/", 4) || !strncmp(name, "dri/", 4));
+#else
+    return maj == DRM_MAJOR;
+#endif
+}
+
+char *amdgpu_get_device_from_fd(int fd)
+{
+#ifdef __linux__
+    struct stat sbuf;
+    char path[PATH_MAX + 1];
+    unsigned int maj, min;
+
+    if (fstat(fd, &sbuf))
+        return NULL;
+
+    maj = major(sbuf.st_rdev);
+    min = minor(sbuf.st_rdev);
+
+    if (!amdgpu_node_is_drm(maj, min) || !S_ISCHR(sbuf.st_mode))
+        return NULL;
+
+    snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
+    return strdup(path);
+#else
+    return NULL;
+#endif
 }
 
 /* The main() function for setting up and running the tests.
@@ -541,7 +643,6 @@
 	int display_devices = 0;/* By default not to display devices' info */
 	CU_pSuite pSuite = NULL;
 	CU_pTest  pTest  = NULL;
-	int test_device_index;
 	int display_list = 0;
 	int force_run = 0;
 
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index 98cec69..cc12756 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -273,6 +273,29 @@
 							  unsigned ip_type,
 							  bool secure);
 
+
+
+/**
+ * Initialize hotunplug test suite
+ */
+int suite_hotunplug_tests_init();
+
+/**
+ * Deinitialize hotunplug test suite
+ */
+int suite_hotunplug_tests_clean();
+
+/**
+ * Decide if the suite is enabled by default or not.
+ */
+CU_BOOL suite_hotunplug_tests_enable(void);
+
+/**
+ * Tests in uvd enc test suite
+ */
+extern CU_TestInfo hotunplug_tests[];
+
+
 /**
  * Helper functions
  */
@@ -449,13 +472,18 @@
 	return r;
 }
 
-static inline bool asic_is_arcturus(uint32_t asic_id)
+
+static inline bool asic_is_gfx_pipe_removed(uint32_t family_id, uint32_t chip_id, uint32_t chip_rev)
 {
-	switch(asic_id) {
-	/* Arcturus asic DID */
-	case 0x738C:
-	case 0x7388:
-	case 0x738E:
+
+	if (family_id != AMDGPU_FAMILY_AI)
+	return false;
+
+	switch (chip_id - chip_rev) {
+	/* Arcturus */
+	case 0x32:
+	/* Aldebaran */
+	case 0x3c:
 		return true;
 	default:
 		return false;
@@ -471,4 +499,8 @@
 				    struct amdgpu_cs_request *ibs_request,
 				    bool secure);
 
+void amdgpu_close_devices();
+int amdgpu_open_device_on_test_index(int render_node);
+char *amdgpu_get_device_from_fd(int fd);
+
 #endif  /* #ifdef _AMDGPU_TEST_H_ */
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index dc9ed94..0180f9c 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -46,6 +46,8 @@
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
 static  uint32_t  family_id;
+static  uint32_t  chip_id;
+static  uint32_t  chip_rev;
 
 static void amdgpu_query_info_test(void);
 static void amdgpu_command_submission_gfx(void);
@@ -341,9 +343,10 @@
 };
 
 static const uint32_t bufferclear_cs_shader_gfx9[] = {
-    0xD1FD0000, 0x04010C08, 0x7E020204, 0x7E040205,
-    0x7E060206, 0x7E080207, 0xE01C2000, 0x80000100,
-    0xBF810000
+    0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+    0x7e020280, 0x7e040204, 0x7e060205, 0x7e080206,
+    0x7e0a0207, 0xe01c2000, 0x80000200, 0xbf8c0000,
+    0xbf810000
 };
 
 static const uint32_t bufferclear_cs_shader_registers_gfx9[][2] = {
@@ -357,8 +360,9 @@
 static const uint32_t bufferclear_cs_shader_registers_num_gfx9 = 5;
 
 static const uint32_t buffercopy_cs_shader_gfx9[] = {
-    0xD1FD0000, 0x04010C08, 0xE00C2000, 0x80000100,
-    0xBF8C0F70, 0xE01C2000, 0x80010100, 0xBF810000
+    0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+    0x7e020280, 0xe00c2000, 0x80000200, 0xbf8c0f70,
+    0xe01c2000, 0x80010200, 0xbf810000
 };
 
 static const uint32_t preamblecache_gfx9[] = {
@@ -617,19 +621,21 @@
 
 CU_BOOL suite_basic_tests_enable(void)
 {
-	uint32_t asic_id;
 
 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
 					     &minor_version, &device_handle))
 		return CU_FALSE;
 
-	asic_id = device_handle->info.asic_id;
+
+	family_id = device_handle->info.family_id;
+	chip_id = device_handle->info.chip_external_rev;
+	chip_rev = device_handle->info.chip_rev;
 
 	if (amdgpu_device_deinitialize(device_handle))
 		return CU_FALSE;
 
-	/* disable gfx engine basic test cases for Arturus due to no CPG */
-	if (asic_is_arcturus(asic_id)) {
+	/* disable gfx engine basic test cases for some asics have no CPG */
+	if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
 		if (amdgpu_set_test_active("Basic Tests",
 					"Command submission Test (GFX)",
 					CU_FALSE))
@@ -1066,6 +1072,14 @@
 	amdgpu_bo_list_handle bo_list[2];
 	amdgpu_va_handle va_handle[2];
 	int r, i;
+	struct amdgpu_gpu_info gpu_info = {0};
+	unsigned gc_ip_type;
+
+	r = amdgpu_query_gpu_info(device_handle, &gpu_info);
+	CU_ASSERT_EQUAL(r, 0);
+
+	gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
+			AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
 
 	if (family_id == AMDGPU_FAMILY_SI) {
 		sdma_nop = SDMA_PACKET_SI(SDMA_NOP_SI, 0, 0, 0, 0);
@@ -1108,14 +1122,14 @@
 	r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem);
 	CU_ASSERT_EQUAL(r, 0);
 
-	r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
+	r = amdgpu_cs_wait_semaphore(context_handle[0], gc_ip_type, 0, 0, sem);
 	CU_ASSERT_EQUAL(r, 0);
 	ptr = ib_result_cpu[1];
 	ptr[0] = gfx_nop;
 	ib_info[1].ib_mc_address = ib_result_mc_address[1];
 	ib_info[1].size = 1;
 
-	ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request[1].ip_type = gc_ip_type;
 	ibs_request[1].number_of_ibs = 1;
 	ibs_request[1].ibs = &ib_info[1];
 	ibs_request[1].resources = bo_list[1];
@@ -1125,7 +1139,7 @@
 	CU_ASSERT_EQUAL(r, 0);
 
 	fence_status.context = context_handle[0];
-	fence_status.ip_type = AMDGPU_HW_IP_GFX;
+	fence_status.ip_type = gc_ip_type;
 	fence_status.ip_instance = 0;
 	fence_status.fence = ibs_request[1].seq_no;
 	r = amdgpu_cs_query_fence_status(&fence_status,
@@ -1139,24 +1153,24 @@
 	ib_info[0].ib_mc_address = ib_result_mc_address[0];
 	ib_info[0].size = 1;
 
-	ibs_request[0].ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request[0].ip_type = gc_ip_type;
 	ibs_request[0].number_of_ibs = 1;
 	ibs_request[0].ibs = &ib_info[0];
 	ibs_request[0].resources = bo_list[0];
 	ibs_request[0].fence_info.handle = NULL;
 	r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
 	CU_ASSERT_EQUAL(r, 0);
-	r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
+	r = amdgpu_cs_signal_semaphore(context_handle[0], gc_ip_type, 0, 0, sem);
 	CU_ASSERT_EQUAL(r, 0);
 
-	r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
+	r = amdgpu_cs_wait_semaphore(context_handle[1], gc_ip_type, 0, 0, sem);
 	CU_ASSERT_EQUAL(r, 0);
 	ptr = ib_result_cpu[1];
 	ptr[0] = gfx_nop;
 	ib_info[1].ib_mc_address = ib_result_mc_address[1];
 	ib_info[1].size = 1;
 
-	ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request[1].ip_type = gc_ip_type;
 	ibs_request[1].number_of_ibs = 1;
 	ibs_request[1].ibs = &ib_info[1];
 	ibs_request[1].resources = bo_list[1];
@@ -1166,7 +1180,7 @@
 	CU_ASSERT_EQUAL(r, 0);
 
 	fence_status.context = context_handle[1];
-	fence_status.ip_type = AMDGPU_HW_IP_GFX;
+	fence_status.ip_type = gc_ip_type;
 	fence_status.ip_instance = 0;
 	fence_status.fence = ibs_request[1].seq_no;
 	r = amdgpu_cs_query_fence_status(&fence_status,
diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
index 4c11665..8fc7fe2 100644
--- a/tests/amdgpu/bo_tests.c
+++ b/tests/amdgpu/bo_tests.c
@@ -168,7 +168,7 @@
 	struct amdgpu_bo_info info = {0};
 	int r;
 
-	meta.size_metadata = 1;
+	meta.size_metadata = 4;
 	meta.umd_metadata[0] = 0xdeadbeef;
 
 	r = amdgpu_bo_set_metadata(buffer_handle, &meta);
@@ -177,7 +177,7 @@
 	r = amdgpu_bo_query_info(buffer_handle, &info);
 	CU_ASSERT_EQUAL(r, 0);
 
-	CU_ASSERT_EQUAL(info.metadata.size_metadata, 1);
+	CU_ASSERT_EQUAL(info.metadata.size_metadata, 4);
 	CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
 }
 
diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
index 10124c1..f509678 100644
--- a/tests/amdgpu/cs_tests.c
+++ b/tests/amdgpu/cs_tests.c
@@ -64,21 +64,20 @@
 
 CU_BOOL suite_cs_tests_enable(void)
 {
-	uint32_t asic_id;
-
 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
 					     &minor_version, &device_handle))
 		return CU_FALSE;
 
 	family_id = device_handle->info.family_id;
-	asic_id = device_handle->info.asic_id;
+    chip_id = device_handle->info.chip_external_rev;
+    chip_rev = device_handle->info.chip_rev;
 
 	if (amdgpu_device_deinitialize(device_handle))
 		return CU_FALSE;
 
 
 	if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
-		asic_is_arcturus(asic_id)) {
+		asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
 		printf("\n\nThe ASIC NOT support UVD, suite disabled\n");
 		return CU_FALSE;
 	}
diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
index 248cc33..07a3944 100644
--- a/tests/amdgpu/deadlock_tests.c
+++ b/tests/amdgpu/deadlock_tests.c
@@ -106,6 +106,10 @@
 static pthread_t stress_thread;
 static uint32_t *ptr;
 
+static uint32_t family_id;
+static uint32_t chip_rev;
+static uint32_t chip_id;
+
 int use_uc_mtype = 0;
 
 static void amdgpu_deadlock_helper(unsigned ip_type);
@@ -124,25 +128,27 @@
 CU_BOOL suite_deadlock_tests_enable(void)
 {
 	CU_BOOL enable = CU_TRUE;
-	uint32_t asic_id;
 
 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
 					     &minor_version, &device_handle))
 		return CU_FALSE;
 
+	family_id = device_handle->info.family_id;
+	chip_id = device_handle->info.chip_external_rev;
+	chip_rev = device_handle->info.chip_rev;
+
 	/*
 	 * Only enable for ASICs supporting GPU reset and for which it's enabled
 	 * by default (currently GFX8/9 dGPUS)
 	 */
-	if (device_handle->info.family_id != AMDGPU_FAMILY_VI &&
-	    device_handle->info.family_id != AMDGPU_FAMILY_AI &&
-	    device_handle->info.family_id != AMDGPU_FAMILY_CI) {
+	if (family_id != AMDGPU_FAMILY_VI &&
+	    family_id != AMDGPU_FAMILY_AI &&
+	    family_id != AMDGPU_FAMILY_CI) {
 		printf("\n\nGPU reset is not enabled for the ASIC, deadlock suite disabled\n");
 		enable = CU_FALSE;
 	}
 
-	asic_id = device_handle->info.asic_id;
-	if (asic_is_arcturus(asic_id)) {
+	if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
 		if (amdgpu_set_test_active("Deadlock Tests",
 					"gfx ring block test (set amdgpu.lockup_timeout=50)",
 					CU_FALSE))
diff --git a/tests/amdgpu/hotunplug_tests.c b/tests/amdgpu/hotunplug_tests.c
new file mode 100644
index 0000000..23ea140
--- /dev/null
+++ b/tests/amdgpu/hotunplug_tests.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#if HAVE_ALLOCA_H
+# include <alloca.h>
+#endif
+
+#include "CUnit/Basic.h"
+
+#include "amdgpu_test.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "xf86drm.h"
+#include <pthread.h>
+
+#define GFX_COMPUTE_NOP  0xffff1000
+
+static  amdgpu_device_handle device_handle;
+static  uint32_t  major_version;
+static  uint32_t  minor_version;
+static char *sysfs_remove = NULL;
+static bool do_cs;
+
+CU_BOOL suite_hotunplug_tests_enable(void)
+{
+	CU_BOOL enable = CU_TRUE;
+	drmDevicePtr device;
+
+	if (drmGetDevice2(drm_amdgpu[0], DRM_DEVICE_GET_PCI_REVISION, &device)) {
+		printf("\n\nGPU Failed to get DRM device PCI info!\n");
+		return CU_FALSE;
+	}
+
+	if (device->bustype != DRM_BUS_PCI) {
+		printf("\n\nGPU device is not on PCI bus!\n");
+		amdgpu_device_deinitialize(device_handle);
+		return CU_FALSE;
+	}
+
+	/* Disable until the hot-unplug support in kernel gets into drm-next */
+	if (major_version < 0xff)
+		enable = false;
+
+	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
+					     &minor_version, &device_handle))
+		return CU_FALSE;
+
+	/* TODO Once DRM version for unplug feature ready compare here agains it*/
+
+	if (amdgpu_device_deinitialize(device_handle))
+		return CU_FALSE;
+
+	return enable;
+}
+
+int suite_hotunplug_tests_init(void)
+{
+	/* We need to open/close device at each test manually */
+	amdgpu_close_devices();
+
+	return CUE_SUCCESS;
+}
+
+int suite_hotunplug_tests_clean(void)
+{
+
+
+	return CUE_SUCCESS;
+}
+
+static int amdgpu_hotunplug_trigger(const char *pathname)
+{
+	int fd, len;
+
+	fd = open(pathname, O_WRONLY);
+	if (fd < 0)
+		return -errno;
+
+	len = write(fd, "1", 1);
+	close(fd);
+
+	return len;
+}
+
+static int amdgpu_hotunplug_setup_test()
+{
+	int r;
+	char *tmp_str;
+
+	if (amdgpu_open_device_on_test_index(open_render_node) < 0) {
+		printf("\n\n Failed to reopen device file!\n");
+		return CUE_SINIT_FAILED;
+
+
+
+	}
+
+	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
+				   &minor_version, &device_handle);
+
+	if (r) {
+		if ((r == -EACCES) && (errno == EACCES))
+			printf("\n\nError:%s. "
+				"Hint:Try to run this test program as root.",
+				strerror(errno));
+		return CUE_SINIT_FAILED;
+	}
+
+	tmp_str = amdgpu_get_device_from_fd(drm_amdgpu[0]);
+	if (!tmp_str){
+		printf("\n\n Device path not found!\n");
+		return  CUE_SINIT_FAILED;
+	}
+
+	sysfs_remove = realloc(tmp_str, strlen(tmp_str) * 2);
+	strcat(sysfs_remove, "/remove");
+
+	return 0;
+}
+
+static int amdgpu_hotunplug_teardown_test()
+{
+	if (amdgpu_device_deinitialize(device_handle))
+		return CUE_SCLEAN_FAILED;
+
+	amdgpu_close_devices();
+
+	if (sysfs_remove)
+		free(sysfs_remove);
+
+	return 0;
+}
+
+static inline int amdgpu_hotunplug_remove()
+{
+	return amdgpu_hotunplug_trigger(sysfs_remove);
+}
+
+static inline int amdgpu_hotunplug_rescan()
+{
+	return amdgpu_hotunplug_trigger("/sys/bus/pci/rescan");
+}
+
+static int amdgpu_cs_sync(amdgpu_context_handle context,
+			   unsigned int ip_type,
+			   int ring,
+			   unsigned int seqno)
+{
+	struct amdgpu_cs_fence fence = {
+		.context = context,
+		.ip_type = ip_type,
+		.ring = ring,
+		.fence = seqno,
+	};
+	uint32_t expired;
+
+	return  amdgpu_cs_query_fence_status(&fence,
+					   AMDGPU_TIMEOUT_INFINITE,
+					   0, &expired);
+}
+
+static void *amdgpu_nop_cs()
+{
+	amdgpu_bo_handle ib_result_handle;
+	void *ib_result_cpu;
+	uint64_t ib_result_mc_address;
+	uint32_t *ptr;
+	int i, r;
+	amdgpu_bo_list_handle bo_list;
+	amdgpu_va_handle va_handle;
+	amdgpu_context_handle context;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_ib_info ib_info;
+
+	r = amdgpu_cs_ctx_create(device_handle, &context);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle, &ib_result_cpu,
+				    &ib_result_mc_address, &va_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = ib_result_cpu;
+	for (i = 0; i < 16; ++i)
+		ptr[i] = GFX_COMPUTE_NOP;
+
+	r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+	CU_ASSERT_EQUAL(r, 0);
+
+	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+	ib_info.ib_mc_address = ib_result_mc_address;
+	ib_info.size = 16;
+
+	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request.ring = 0;
+	ibs_request.number_of_ibs = 1;
+	ibs_request.ibs = &ib_info;
+	ibs_request.resources = bo_list;
+
+	while (do_cs)
+		amdgpu_cs_submit(context, 0, &ibs_request, 1);
+
+	amdgpu_cs_sync(context, AMDGPU_HW_IP_GFX, 0, ibs_request.seq_no);
+	amdgpu_bo_list_destroy(bo_list);
+	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				 ib_result_mc_address, 4096);
+
+	amdgpu_cs_ctx_free(context);
+
+	return (void *)0;
+}
+
+static pthread_t* amdgpu_create_cs_thread()
+{
+	int r;
+	pthread_t *thread = malloc(sizeof(*thread));
+	if (!thread)
+		return NULL;
+
+	do_cs = true;
+
+	r = pthread_create(thread, NULL, amdgpu_nop_cs, NULL);
+	CU_ASSERT_EQUAL(r, 0);
+
+	/* Give thread enough time to start*/
+	usleep(100000);
+	return thread;
+}
+
+static void amdgpu_destroy_cs_thread(pthread_t *thread)
+{
+	void *status;
+
+	do_cs = false;
+
+	pthread_join(*thread, &status);
+	CU_ASSERT_EQUAL(status, 0);
+
+	free(thread);
+}
+
+
+static void amdgpu_hotunplug_test(bool with_cs)
+{
+	int r;
+	pthread_t *thread = NULL;
+
+	r = amdgpu_hotunplug_setup_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	if (with_cs) {
+		thread = amdgpu_create_cs_thread();
+		CU_ASSERT_NOT_EQUAL(thread, NULL);
+	}
+
+	r = amdgpu_hotunplug_remove();
+	CU_ASSERT_EQUAL(r > 0, 1);
+
+	if (with_cs)
+		amdgpu_destroy_cs_thread(thread);
+
+	r = amdgpu_hotunplug_teardown_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	r = amdgpu_hotunplug_rescan();
+	CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+static void amdgpu_hotunplug_simple(void)
+{
+	amdgpu_hotunplug_test(false);
+}
+
+static void amdgpu_hotunplug_with_cs(void)
+{
+	amdgpu_hotunplug_test(true);
+}
+
+static void amdgpu_hotunplug_with_exported_bo(void)
+{
+	int r;
+	uint32_t dma_buf_fd;
+	unsigned int *ptr;
+	amdgpu_bo_handle bo_handle;
+
+	struct amdgpu_bo_alloc_request request = {
+		.alloc_size = 4096,
+		.phys_alignment = 4096,
+		.preferred_heap = AMDGPU_GEM_DOMAIN_GTT,
+		.flags = 0,
+	};
+
+	r = amdgpu_hotunplug_setup_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	amdgpu_bo_alloc(device_handle, &request, &bo_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_export(bo_handle, amdgpu_bo_handle_type_dma_buf_fd, &dma_buf_fd);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
+	CU_ASSERT_NOT_EQUAL(ptr,  MAP_FAILED);
+
+	r = amdgpu_hotunplug_remove();
+	CU_ASSERT_EQUAL(r > 0, 1);
+
+	amdgpu_bo_free(bo_handle);
+
+	r = amdgpu_hotunplug_teardown_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	*ptr = 0xdeafbeef;
+
+	munmap(ptr, 4096);
+	close (dma_buf_fd);
+
+	r = amdgpu_hotunplug_rescan();
+	CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+static void amdgpu_hotunplug_with_exported_fence(void)
+{
+	amdgpu_bo_handle ib_result_handle;
+	void *ib_result_cpu;
+	uint64_t ib_result_mc_address;
+	uint32_t *ptr, sync_obj_handle, sync_obj_handle2;
+	int i, r;
+	amdgpu_bo_list_handle bo_list;
+	amdgpu_va_handle va_handle;
+	uint32_t major2, minor2;
+	amdgpu_device_handle device2;
+	amdgpu_context_handle context;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_ib_info ib_info;
+	struct amdgpu_cs_fence fence_status = {0};
+	int shared_fd;
+
+	r = amdgpu_hotunplug_setup_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	r = amdgpu_device_initialize(drm_amdgpu[1], &major2, &minor2, &device2);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_cs_ctx_create(device_handle, &context);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle, &ib_result_cpu,
+				    &ib_result_mc_address, &va_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = ib_result_cpu;
+	for (i = 0; i < 16; ++i)
+		ptr[i] = GFX_COMPUTE_NOP;
+
+	r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+	CU_ASSERT_EQUAL(r, 0);
+
+	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+	ib_info.ib_mc_address = ib_result_mc_address;
+	ib_info.size = 16;
+
+	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request.ring = 0;
+	ibs_request.number_of_ibs = 1;
+	ibs_request.ibs = &ib_info;
+	ibs_request.resources = bo_list;
+
+	CU_ASSERT_EQUAL(amdgpu_cs_submit(context, 0, &ibs_request, 1), 0);
+
+	fence_status.context = context;
+	fence_status.ip_type = AMDGPU_HW_IP_GFX;
+	fence_status.ip_instance = 0;
+	fence_status.fence = ibs_request.seq_no;
+
+	CU_ASSERT_EQUAL(amdgpu_cs_fence_to_handle(device_handle, &fence_status,
+						AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ,
+						&sync_obj_handle),
+						0);
+
+	CU_ASSERT_EQUAL(amdgpu_cs_export_syncobj(device_handle, sync_obj_handle, &shared_fd), 0);
+
+	CU_ASSERT_EQUAL(amdgpu_cs_import_syncobj(device2, shared_fd, &sync_obj_handle2), 0);
+
+	CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device_handle, sync_obj_handle), 0);
+
+	CU_ASSERT_EQUAL(amdgpu_bo_list_destroy(bo_list), 0);
+	CU_ASSERT_EQUAL(amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				 ib_result_mc_address, 4096), 0);
+	CU_ASSERT_EQUAL(amdgpu_cs_ctx_free(context), 0);
+
+	r = amdgpu_hotunplug_remove();
+	CU_ASSERT_EQUAL(r > 0, 1);
+
+	CU_ASSERT_EQUAL(amdgpu_cs_syncobj_wait(device2, &sync_obj_handle2, 1, 100000000, 0, NULL), 0);
+
+	CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device2, sync_obj_handle2), 0);
+
+	amdgpu_device_deinitialize(device2);
+
+	r = amdgpu_hotunplug_teardown_test();
+	CU_ASSERT_EQUAL(r , 0);
+
+	r = amdgpu_hotunplug_rescan();
+	CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+
+CU_TestInfo hotunplug_tests[] = {
+	{ "Unplug card and rescan the bus to plug it back", amdgpu_hotunplug_simple },
+	{ "Same as first test but with command submission", amdgpu_hotunplug_with_cs },
+	{ "Unplug with exported bo", amdgpu_hotunplug_with_exported_bo },
+	{ "Unplug with exported fence", amdgpu_hotunplug_with_exported_fence },
+	CU_TEST_INFO_NULL,
+};
diff --git a/tests/amdgpu/meson.build b/tests/amdgpu/meson.build
index eb16a50..e6e3081 100644
--- a/tests/amdgpu/meson.build
+++ b/tests/amdgpu/meson.build
@@ -25,6 +25,7 @@
       'amdgpu_test.c', 'basic_tests.c', 'bo_tests.c', 'cs_tests.c',
       'vce_tests.c', 'uvd_enc_tests.c', 'vcn_tests.c', 'deadlock_tests.c',
       'vm_tests.c', 'ras_tests.c', 'syncobj_tests.c', 'security_tests.c',
+      'hotunplug_tests.c'
     ),
     dependencies : [dep_cunit, dep_threads, dep_atomic_ops],
     include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],
diff --git a/tests/amdgpu/security_tests.c b/tests/amdgpu/security_tests.c
index eed695a..280e862 100644
--- a/tests/amdgpu/security_tests.c
+++ b/tests/amdgpu/security_tests.c
@@ -315,7 +315,7 @@
 				  SECURE_BUFFER_SIZE,
 				  page_size,
 				  AMDGPU_GEM_DOMAIN_VRAM,
-				  0 /* AMDGPU_GEM_CREATE_ENCRYPTED */,
+				  AMDGPU_GEM_CREATE_ENCRYPTED,
 				  &bob);
 	if (res) {
 		PRINT_ERROR(res);
@@ -323,9 +323,9 @@
 		goto Out_free_Alice;
 	}
 
-	/* sDMA clear copy from Alice to Bob.
+	/* sDMA TMZ copy from Alice to Bob.
 	 */
-	amdgpu_bo_lcopy(&sb_ctx, &bob, &alice, SECURE_BUFFER_SIZE, 0);
+	amdgpu_bo_lcopy(&sb_ctx, &bob, &alice, SECURE_BUFFER_SIZE, 1);
 
 	/* Move Bob to the GTT domain.
 	 */
@@ -336,9 +336,9 @@
 		goto Out_free_all;
 	}
 
-	/* sDMA clear copy from Bob to Alice.
+	/* sDMA TMZ copy from Bob to Alice.
 	 */
-	amdgpu_bo_lcopy(&sb_ctx, &alice, &bob, SECURE_BUFFER_SIZE, 0);
+	amdgpu_bo_lcopy(&sb_ctx, &alice, &bob, SECURE_BUFFER_SIZE, 1);
 
 	/* Verify the contents of Alice.
 	 */
@@ -432,7 +432,8 @@
 				     &minor_version, &device_handle))
 		return CU_FALSE;
 
-	if (device_handle->info.family_id != AMDGPU_FAMILY_RV) {
+
+	if (!(device_handle->dev_info.ids_flags & AMDGPU_IDS_FLAGS_TMZ)) {
 		printf("\n\nDon't support TMZ (trust memory zone), security suite disabled\n");
 		enable = CU_FALSE;
 	}
diff --git a/tests/amdgpu/syncobj_tests.c b/tests/amdgpu/syncobj_tests.c
index 3a7b38e..690bea0 100644
--- a/tests/amdgpu/syncobj_tests.c
+++ b/tests/amdgpu/syncobj_tests.c
@@ -33,6 +33,10 @@
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
 
+static  uint32_t  family_id;
+static  uint32_t  chip_id;
+static  uint32_t  chip_rev;
+
 static void amdgpu_syncobj_timeline_test(void);
 
 CU_BOOL suite_syncobj_timeline_tests_enable(void)
@@ -100,6 +104,18 @@
 	int i, r;
 	uint64_t seq_no;
 	static uint32_t *ptr;
+	struct amdgpu_gpu_info gpu_info = {0};
+	unsigned gc_ip_type;
+
+	r = amdgpu_query_gpu_info(device_handle, &gpu_info);
+	CU_ASSERT_EQUAL(r, 0);
+
+	family_id = device_handle->info.family_id;
+	chip_id = device_handle->info.chip_external_rev;
+	chip_rev = device_handle->info.chip_rev;
+
+	gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
+			AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
 
 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
 	CU_ASSERT_EQUAL(r, 0);
@@ -125,11 +141,11 @@
 	chunk_data.ib_data._pad = 0;
 	chunk_data.ib_data.va_start = ib_result_mc_address;
 	chunk_data.ib_data.ib_bytes = 16 * 4;
-	chunk_data.ib_data.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX :
+	chunk_data.ib_data.ip_type = wait_or_signal ? gc_ip_type :
 		AMDGPU_HW_IP_DMA;
 	chunk_data.ib_data.ip_instance = 0;
 	chunk_data.ib_data.ring = 0;
-	chunk_data.ib_data.flags = 0;
+	chunk_data.ib_data.flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
 
 	chunks[1].chunk_id = wait_or_signal ?
 		AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT :
@@ -151,7 +167,7 @@
 
 	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
 	fence_status.context = context_handle;
-	fence_status.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX:
+	fence_status.ip_type = wait_or_signal ? gc_ip_type :
 		AMDGPU_HW_IP_DMA;
 	fence_status.ip_instance = 0;
 	fence_status.ring = 0;
diff --git a/tests/amdgpu/vce_tests.c b/tests/amdgpu/vce_tests.c
index 5434e44..4e925ca 100644
--- a/tests/amdgpu/vce_tests.c
+++ b/tests/amdgpu/vce_tests.c
@@ -116,7 +116,7 @@
 		return CU_FALSE;
 
 	if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
-		asic_is_arcturus(asic_id)) {
+		asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
 		printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
 		return CU_FALSE;
 	}
diff --git a/tests/amdgpu/vcn_tests.c b/tests/amdgpu/vcn_tests.c
index 1ca6629..628b491 100644
--- a/tests/amdgpu/vcn_tests.c
+++ b/tests/amdgpu/vcn_tests.c
@@ -114,7 +114,7 @@
 	if (r != 0 || !info.available_rings ||
 	    (family_id < AMDGPU_FAMILY_RV &&
 	     (family_id == AMDGPU_FAMILY_AI &&
-	      chip_id != (chip_rev + 0x32)))) {  /* Arcturus */
+	      (chip_id - chip_rev) < 0x32))) {  /* Arcturus */
 		printf("\n\nThe ASIC NOT support VCN, suite disabled\n");
 		return CU_FALSE;
 	}
diff --git a/tests/amdgpu/vm_tests.c b/tests/amdgpu/vm_tests.c
index 95011ea..b94999c 100644
--- a/tests/amdgpu/vm_tests.c
+++ b/tests/amdgpu/vm_tests.c
@@ -30,6 +30,9 @@
 static  amdgpu_device_handle device_handle;
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
+static  uint32_t  family_id;
+static  uint32_t  chip_id;
+static  uint32_t  chip_rev;
 
 static void amdgpu_vmid_reserve_test(void);
 static void amdgpu_vm_unaligned_map(void);
@@ -110,7 +113,11 @@
 	r = amdgpu_query_gpu_info(device_handle, &gpu_info);
 	CU_ASSERT_EQUAL(r, 0);
 
-	gc_ip_type = (asic_is_arcturus(gpu_info.asic_id)) ?
+	family_id = device_handle->info.family_id;
+	chip_id = device_handle->info.chip_external_rev;
+	chip_rev = device_handle->info.chip_rev;
+
+	gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
 			AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
 
 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
diff --git a/tests/modetest/modetest.c b/tests/modetest/modetest.c
index fc75383..eca08ef 100644
--- a/tests/modetest/modetest.c
+++ b/tests/modetest/modetest.c
@@ -265,52 +265,37 @@
 
 static const char *modifier_to_string(uint64_t modifier)
 {
-	switch (modifier) {
-	case DRM_FORMAT_MOD_INVALID:
-		return "INVALID";
-	case DRM_FORMAT_MOD_LINEAR:
-		return "LINEAR";
-	case I915_FORMAT_MOD_X_TILED:
-		return "X_TILED";
-	case I915_FORMAT_MOD_Y_TILED:
-		return "Y_TILED";
-	case I915_FORMAT_MOD_Yf_TILED:
-		return "Yf_TILED";
-	case I915_FORMAT_MOD_Y_TILED_CCS:
-		return "Y_TILED_CCS";
-	case I915_FORMAT_MOD_Yf_TILED_CCS:
-		return "Yf_TILED_CCS";
-	case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
-		return "SAMSUNG_64_32_TILE";
-	case DRM_FORMAT_MOD_VIVANTE_TILED:
-		return "VIVANTE_TILED";
-	case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
-		return "VIVANTE_SUPER_TILED";
-	case DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED:
-		return "VIVANTE_SPLIT_TILED";
-	case DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED:
-		return "VIVANTE_SPLIT_SUPER_TILED";
-	case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
-		return "NVIDIA_TEGRA_TILED";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
-		return "NVIDIA_16BX2_BLOCK(0)";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
-		return "NVIDIA_16BX2_BLOCK(1)";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
-		return "NVIDIA_16BX2_BLOCK(2)";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
-		return "NVIDIA_16BX2_BLOCK(3)";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
-		return "NVIDIA_16BX2_BLOCK(4)";
-	case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
-		return "NVIDIA_16BX2_BLOCK(5)";
-	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
-		return "MOD_BROADCOM_VC4_T_TILED";
-	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
-		return "QCOM_COMPRESSED";
-	default:
-		return "(UNKNOWN MODIFIER)";
+	static char mod_string[4096];
+
+	char *modifier_name = drmGetFormatModifierName(modifier);
+	char *vendor_name = drmGetFormatModifierVendor(modifier);
+	memset(mod_string, 0x00, sizeof(mod_string));
+
+	if (!modifier_name) {
+		if (vendor_name)
+			snprintf(mod_string, sizeof(mod_string), "%s_%s",
+				 vendor_name, "UNKNOWN_MODIFIER");
+		else
+			snprintf(mod_string, sizeof(mod_string), "%s_%s",
+				 "UNKNOWN_VENDOR", "UNKNOWN_MODIFIER");
+		/* safe, as free is no-op for NULL */
+		free(vendor_name);
+		return mod_string;
 	}
+
+	if (modifier == DRM_FORMAT_MOD_LINEAR) {
+		snprintf(mod_string, sizeof(mod_string), "%s", modifier_name);
+		free(modifier_name);
+		free(vendor_name);
+		return mod_string;
+	}
+
+	snprintf(mod_string, sizeof(mod_string), "%s_%s",
+		 vendor_name, modifier_name);
+
+	free(modifier_name);
+	free(vendor_name);
+	return mod_string;
 }
 
 static void dump_in_formats(struct device *dev, uint32_t blob_id)
@@ -1725,13 +1710,21 @@
 static void set_cursors(struct device *dev, struct pipe_arg *pipes, unsigned int count)
 {
 	uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0};
+	uint32_t cw = 64;
+	uint32_t ch = 64;
 	struct bo *bo;
+	uint64_t value;
 	unsigned int i;
 	int ret;
 
-	/* maybe make cursor width/height configurable some day */
-	uint32_t cw = 64;
-	uint32_t ch = 64;
+	ret = drmGetCap(dev->fd, DRM_CAP_CURSOR_WIDTH, &value);
+	if (!ret)
+		cw = value;
+
+	ret = drmGetCap(dev->fd, DRM_CAP_CURSOR_HEIGHT, &value);
+	if (!ret)
+		ch = value;
+
 
 	/* create cursor bo.. just using PATTERN_PLAIN as it has
 	 * translucent alpha
diff --git a/xf86drm.c b/xf86drm.c
index edfeb34..2abc744 100644
--- a/xf86drm.c
+++ b/xf86drm.c
@@ -61,6 +61,7 @@
 #include <sys/sysctl.h>
 #endif
 #include <math.h>
+#include <inttypes.h>
 
 #if defined(__FreeBSD__)
 #include <sys/param.h>
@@ -76,6 +77,7 @@
 
 #include "xf86drm.h"
 #include "libdrm_macros.h"
+#include "drm_fourcc.h"
 
 #include "util_math.h"
 
@@ -124,6 +126,426 @@
 static bool drmNodeIsDRM(int maj, int min);
 static char *drmGetMinorNameForFD(int fd, int type);
 
+#define DRM_MODIFIER(v, f, f_name) \
+       .modifier = DRM_FORMAT_MOD_##v ## _ ##f, \
+       .modifier_name = #f_name
+
+#define DRM_MODIFIER_INVALID(v, f_name) \
+       .modifier = DRM_FORMAT_MOD_INVALID, .modifier_name = #f_name
+
+#define DRM_MODIFIER_LINEAR(v, f_name) \
+       .modifier = DRM_FORMAT_MOD_LINEAR, .modifier_name = #f_name
+
+/* Intel is abit special as the format doesn't follow other vendors naming
+ * scheme */
+#define DRM_MODIFIER_INTEL(f, f_name) \
+       .modifier = I915_FORMAT_MOD_##f, .modifier_name = #f_name
+
+struct drmFormatModifierInfo {
+    uint64_t modifier;
+    const char *modifier_name;
+};
+
+struct drmFormatModifierVendorInfo {
+    uint8_t vendor;
+    const char *vendor_name;
+};
+
+#include "generated_static_table_fourcc.h"
+
+struct drmVendorInfo {
+    uint8_t vendor;
+    char *(*vendor_cb)(uint64_t modifier);
+};
+
+struct drmFormatVendorModifierInfo {
+    uint64_t modifier;
+    const char *modifier_name;
+};
+
+static char *
+drmGetFormatModifierNameFromArm(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromNvidia(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromAmd(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromAmlogic(uint64_t modifier);
+
+static const struct drmVendorInfo modifier_format_vendor_table[] = {
+    { DRM_FORMAT_MOD_VENDOR_ARM, drmGetFormatModifierNameFromArm },
+    { DRM_FORMAT_MOD_VENDOR_NVIDIA, drmGetFormatModifierNameFromNvidia },
+    { DRM_FORMAT_MOD_VENDOR_AMD, drmGetFormatModifierNameFromAmd },
+    { DRM_FORMAT_MOD_VENDOR_AMLOGIC, drmGetFormatModifierNameFromAmlogic },
+};
+
+#ifndef AFBC_FORMAT_MOD_MODE_VALUE_MASK
+#define AFBC_FORMAT_MOD_MODE_VALUE_MASK	0x000fffffffffffffULL
+#endif
+
+static const struct drmFormatVendorModifierInfo arm_mode_value_table[] = {
+    { AFBC_FORMAT_MOD_YTR,          "YTR" },
+    { AFBC_FORMAT_MOD_SPLIT,        "SPLIT" },
+    { AFBC_FORMAT_MOD_SPARSE,       "SPARSE" },
+    { AFBC_FORMAT_MOD_CBR,          "CBR" },
+    { AFBC_FORMAT_MOD_TILED,        "TILED" },
+    { AFBC_FORMAT_MOD_SC,           "SC" },
+    { AFBC_FORMAT_MOD_DB,           "DB" },
+    { AFBC_FORMAT_MOD_BCH,          "BCH" },
+    { AFBC_FORMAT_MOD_USM,          "USM" },
+};
+
+static bool is_x_t_amd_gfx9_tile(uint64_t tile)
+{
+    switch (tile) {
+    case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
+    case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
+    case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
+           return true;
+    }
+
+    return false;
+}
+
+static bool
+drmGetAfbcFormatModifierNameFromArm(uint64_t modifier, FILE *fp)
+{
+    uint64_t mode_value = modifier & AFBC_FORMAT_MOD_MODE_VALUE_MASK;
+    uint64_t block_size = mode_value & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
+
+    const char *block = NULL;
+    const char *mode = NULL;
+    bool did_print_mode = false;
+
+    /* add block, can only have a (single) block */
+    switch (block_size) {
+    case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+        block = "16x16";
+        break;
+    case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+        block = "32x8";
+        break;
+    case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+        block = "64x4";
+        break;
+    case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+        block = "32x8_64x4";
+        break;
+    }
+
+    if (!block) {
+        return false;
+    }
+
+    fprintf(fp, "BLOCK_SIZE=%s,", block);
+
+    /* add mode */
+    for (unsigned int i = 0; i < ARRAY_SIZE(arm_mode_value_table); i++) {
+        if (arm_mode_value_table[i].modifier & mode_value) {
+            mode = arm_mode_value_table[i].modifier_name;
+            if (!did_print_mode) {
+                fprintf(fp, "MODE=%s", mode);
+                did_print_mode = true;
+            } else {
+                fprintf(fp, "|%s", mode);
+            }
+        }
+    }
+
+    return true;
+}
+
+static bool
+drmGetAfrcFormatModifierNameFromArm(uint64_t modifier, FILE *fp)
+{
+    for (unsigned int i = 0; i < 2; ++i) {
+        uint64_t coding_unit_block =
+          (modifier >> (i * 4)) & AFRC_FORMAT_MOD_CU_SIZE_MASK;
+        const char *coding_unit_size = NULL;
+
+        switch (coding_unit_block) {
+        case AFRC_FORMAT_MOD_CU_SIZE_16:
+            coding_unit_size = "CU_16";
+            break;
+        case AFRC_FORMAT_MOD_CU_SIZE_24:
+            coding_unit_size = "CU_24";
+            break;
+        case AFRC_FORMAT_MOD_CU_SIZE_32:
+            coding_unit_size = "CU_32";
+            break;
+        }
+
+        if (!coding_unit_size) {
+            if (i == 0) {
+                return false;
+            }
+            break;
+        }
+
+        if (i == 0) {
+            fprintf(fp, "P0=%s,", coding_unit_size);
+        } else {
+            fprintf(fp, "P12=%s,", coding_unit_size);
+        }
+    }
+
+    bool scan_layout =
+        (modifier & AFRC_FORMAT_MOD_LAYOUT_SCAN) == AFRC_FORMAT_MOD_LAYOUT_SCAN;
+    if (scan_layout) {
+        fprintf(fp, "SCAN");
+    } else {
+        fprintf(fp, "ROT");
+    }
+    return true;
+}
+
+static char *
+drmGetFormatModifierNameFromArm(uint64_t modifier)
+{
+    uint64_t type = (modifier >> 52) & 0xf;
+
+    FILE *fp;
+    size_t size = 0;
+    char *modifier_name = NULL;
+    bool result = false;
+
+    fp = open_memstream(&modifier_name, &size);
+    if (!fp)
+        return NULL;
+
+    switch (type) {
+    case DRM_FORMAT_MOD_ARM_TYPE_AFBC:
+        result = drmGetAfbcFormatModifierNameFromArm(modifier, fp);
+        break;
+    case DRM_FORMAT_MOD_ARM_TYPE_AFRC:
+        result = drmGetAfrcFormatModifierNameFromArm(modifier, fp);
+        break;
+    /* misc type is already handled by the static table */
+    case DRM_FORMAT_MOD_ARM_TYPE_MISC:
+    default:
+        result = false;
+        break;
+    }
+
+    fclose(fp);
+    if (!result) {
+        free(modifier_name);
+        return NULL;
+    }
+
+    return modifier_name;
+}
+
+static char *
+drmGetFormatModifierNameFromNvidia(uint64_t modifier)
+{
+    uint64_t height, kind, gen, sector, compression;
+
+    height = modifier & 0xf;
+    kind = (modifier >> 12) & 0xff;
+
+    gen = (modifier >> 20) & 0x3;
+    sector = (modifier >> 22) & 0x1;
+    compression = (modifier >> 23) & 0x7;
+
+    /* just in case there could other simpler modifiers, not yet added, avoid
+     * testing against TEGRA_TILE */
+    if ((modifier & 0x10) == 0x10) {
+        char *mod_nvidia;
+        asprintf(&mod_nvidia, "BLOCK_LINEAR_2D,HEIGHT=%"PRIu64",KIND=%"PRIu64","
+                 "GEN=%"PRIu64",SECTOR=%"PRIu64",COMPRESSION=%"PRIu64"", height,
+                 kind, gen, sector, compression);
+        return mod_nvidia;
+    }
+
+    return  NULL;
+}
+
+static void
+drmGetFormatModifierNameFromAmdDcc(uint64_t modifier, FILE *fp)
+{
+    uint64_t dcc_max_compressed_block =
+                AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
+    uint64_t dcc_retile = AMD_FMT_MOD_GET(DCC_RETILE, modifier);
+
+    const char *dcc_max_compressed_block_str = NULL;
+
+    fprintf(fp, ",DCC");
+
+    if (dcc_retile)
+        fprintf(fp, ",DCC_RETILE");
+
+    if (!dcc_retile && AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier))
+        fprintf(fp, ",DCC_PIPE_ALIGN");
+
+    if (AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier))
+        fprintf(fp, ",DCC_INDEPENDENT_64B");
+
+    if (AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier))
+        fprintf(fp, ",DCC_INDEPENDENT_128B");
+
+    switch (dcc_max_compressed_block) {
+    case AMD_FMT_MOD_DCC_BLOCK_64B:
+        dcc_max_compressed_block_str = "64B";
+        break;
+    case AMD_FMT_MOD_DCC_BLOCK_128B:
+        dcc_max_compressed_block_str = "128B";
+        break;
+    case AMD_FMT_MOD_DCC_BLOCK_256B:
+        dcc_max_compressed_block_str = "256B";
+        break;
+    }
+
+    if (dcc_max_compressed_block_str)
+        fprintf(fp, ",DCC_MAX_COMPRESSED_BLOCK=%s",
+                dcc_max_compressed_block_str);
+
+    if (AMD_FMT_MOD_GET(DCC_CONSTANT_ENCODE, modifier))
+        fprintf(fp, ",DCC_CONSTANT_ENCODE");
+}
+
+static void
+drmGetFormatModifierNameFromAmdTile(uint64_t modifier, FILE *fp)
+{
+    uint64_t pipe_xor_bits, bank_xor_bits, packers, rb;
+    uint64_t pipe, pipe_align, dcc, dcc_retile, tile_version;
+
+    pipe_align = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+    pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+    dcc = AMD_FMT_MOD_GET(DCC, modifier);
+    dcc_retile = AMD_FMT_MOD_GET(DCC_RETILE, modifier);
+    tile_version = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+    fprintf(fp, ",PIPE_XOR_BITS=%"PRIu64, pipe_xor_bits);
+
+    if (tile_version == AMD_FMT_MOD_TILE_VER_GFX9) {
+        bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
+        fprintf(fp, ",BANK_XOR_BITS=%"PRIu64, bank_xor_bits);
+    }
+
+    if (tile_version == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+        packers = AMD_FMT_MOD_GET(PACKERS, modifier);
+        fprintf(fp, ",PACKERS=%"PRIu64, packers);
+    }
+
+    if (dcc && tile_version == AMD_FMT_MOD_TILE_VER_GFX9) {
+        rb = AMD_FMT_MOD_GET(RB, modifier);
+        fprintf(fp, ",RB=%"PRIu64, rb);
+    }
+
+    if (dcc && tile_version == AMD_FMT_MOD_TILE_VER_GFX9 &&
+        (dcc_retile || pipe_align)) {
+        pipe = AMD_FMT_MOD_GET(PIPE, modifier);
+        fprintf(fp, ",PIPE_%"PRIu64, pipe);
+    }
+}
+
+static char *
+drmGetFormatModifierNameFromAmd(uint64_t modifier)
+{
+    uint64_t tile, tile_version, dcc;
+    FILE *fp;
+    char *mod_amd = NULL;
+    size_t size = 0;
+
+    const char *str_tile = NULL;
+    const char *str_tile_version = NULL;
+
+    tile = AMD_FMT_MOD_GET(TILE, modifier);
+    tile_version = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+    dcc = AMD_FMT_MOD_GET(DCC, modifier);
+
+    fp = open_memstream(&mod_amd, &size);
+    if (!fp)
+        return NULL;
+
+    /* add tile  */
+    switch (tile_version) {
+    case AMD_FMT_MOD_TILE_VER_GFX9:
+        str_tile_version = "GFX9";
+        break;
+    case AMD_FMT_MOD_TILE_VER_GFX10:
+        str_tile_version = "GFX10";
+        break;
+    case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
+        str_tile_version = "GFX10_RBPLUS";
+        break;
+    }
+
+    if (str_tile_version) {
+        fprintf(fp, "%s", str_tile_version);
+    } else {
+        fclose(fp);
+        free(mod_amd);
+        return NULL;
+    }
+
+    /* add tile str */
+    switch (tile) {
+    case AMD_FMT_MOD_TILE_GFX9_64K_S:
+        str_tile = "GFX9_64K_S";
+        break;
+    case AMD_FMT_MOD_TILE_GFX9_64K_D:
+        str_tile = "GFX9_64K_D";
+        break;
+    case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
+        str_tile = "GFX9_64K_S_X";
+        break;
+    case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
+        str_tile = "GFX9_64K_D_X";
+        break;
+    case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
+        str_tile = "GFX9_64K_R_X";
+        break;
+    }
+
+    if (str_tile)
+        fprintf(fp, ",%s", str_tile);
+
+    if (dcc)
+        drmGetFormatModifierNameFromAmdDcc(modifier, fp);
+
+    if (tile_version >= AMD_FMT_MOD_TILE_VER_GFX9 && is_x_t_amd_gfx9_tile(tile))
+        drmGetFormatModifierNameFromAmdTile(modifier, fp);
+
+    fclose(fp);
+    return mod_amd;
+}
+
+static char *
+drmGetFormatModifierNameFromAmlogic(uint64_t modifier)
+{
+    uint64_t layout = modifier & 0xff;
+    uint64_t options = (modifier >> 8) & 0xff;
+    char *mod_amlogic = NULL;
+
+    const char *layout_str;
+    const char *opts_str;
+
+    switch (layout) {
+    case AMLOGIC_FBC_LAYOUT_BASIC:
+       layout_str = "BASIC";
+       break;
+    case AMLOGIC_FBC_LAYOUT_SCATTER:
+       layout_str = "SCATTER";
+       break;
+    default:
+       layout_str = "INVALID_LAYOUT";
+       break;
+    }
+
+    if (options & AMLOGIC_FBC_OPTION_MEM_SAVING)
+        opts_str = "MEM_SAVING";
+    else
+        opts_str = "0";
+
+    asprintf(&mod_amlogic, "FBC,LAYOUT=%s,OPTIONS=%s", layout_str, opts_str);
+    return mod_amlogic;
+}
+
 static unsigned log2_int(unsigned x)
 {
     unsigned l;
@@ -2913,6 +3335,15 @@
     return 0;
 }
 
+drm_public int drmCloseBufferHandle(int fd, uint32_t handle)
+{
+    struct drm_gem_close args;
+
+    memclear(args);
+    args.handle = handle;
+    return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
+}
+
 static char *drmGetMinorNameForFD(int fd, int type)
 {
 #ifdef __linux__
@@ -3640,6 +4071,7 @@
 static int drm_usb_dev_path(int maj, int min, char *path, size_t len)
 {
     char *value, *tmp_path, *slash;
+    bool usb_device, usb_interface;
 
     snprintf(path, len, "/sys/dev/char/%d:%d/device", maj, min);
 
@@ -3647,9 +4079,13 @@
     if (!value)
         return -ENOENT;
 
-    if (strcmp(value, "usb_device") == 0)
+    usb_device = strcmp(value, "usb_device") == 0;
+    usb_interface = strcmp(value, "usb_interface") == 0;
+    free(value);
+
+    if (usb_device)
         return 0;
-    if (strcmp(value, "usb_interface") != 0)
+    if (!usb_interface)
         return -ENOTSUP;
 
     /* The parent of a usb_interface is a usb_device */
@@ -4585,3 +5021,66 @@
 
     return ret;
 }
+
+static char *
+drmGetFormatModifierFromSimpleTokens(uint64_t modifier)
+{
+    unsigned int i;
+
+    for (i = 0; i < ARRAY_SIZE(drm_format_modifier_table); i++) {
+        if (drm_format_modifier_table[i].modifier == modifier)
+            return strdup(drm_format_modifier_table[i].modifier_name);
+    }
+
+    return NULL;
+}
+
+/** Retrieves a human-readable representation of a vendor (as a string) from
+ * the format token modifier
+ *
+ * \param modifier the format modifier token
+ * \return a char pointer to the human-readable form of the vendor. Caller is
+ * responsible for freeing it.
+ */
+drm_public char *
+drmGetFormatModifierVendor(uint64_t modifier)
+{
+    unsigned int i;
+    uint8_t vendor = fourcc_mod_get_vendor(modifier);
+
+    for (i = 0; i < ARRAY_SIZE(drm_format_modifier_vendor_table); i++) {
+        if (drm_format_modifier_vendor_table[i].vendor == vendor)
+            return strdup(drm_format_modifier_vendor_table[i].vendor_name);
+    }
+
+    return NULL;
+}
+
+/** Retrieves a human-readable representation string from a format token
+ * modifier
+ *
+ * If the dedicated function was not able to extract a valid name or searching
+ * the format modifier was not in the table, this function would return NULL.
+ *
+ * \param modifier the token format
+ * \return a malloc'ed string representation of the modifier. Caller is
+ * responsible for freeing the string returned.
+ *
+ */
+drm_public char *
+drmGetFormatModifierName(uint64_t modifier)
+{
+    uint8_t vendorid = fourcc_mod_get_vendor(modifier);
+    char *modifier_found = NULL;
+    unsigned int i;
+
+    for (i = 0; i < ARRAY_SIZE(modifier_format_vendor_table); i++) {
+        if (modifier_format_vendor_table[i].vendor == vendorid)
+            modifier_found = modifier_format_vendor_table[i].vendor_cb(modifier);
+    }
+
+    if (!modifier_found)
+        return drmGetFormatModifierFromSimpleTokens(modifier);
+
+    return modifier_found;
+}
diff --git a/xf86drm.h b/xf86drm.h
index 9fc06ab..31c1e97 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -834,6 +834,8 @@
 extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
 extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
 
+extern int drmCloseBufferHandle(int fd, uint32_t handle);
+
 extern char *drmGetPrimaryDeviceNameFromFd(int fd);
 extern char *drmGetRenderDeviceNameFromFd(int fd);
 
@@ -944,6 +946,17 @@
 			      uint32_t src_handle, uint64_t src_point,
 			      uint32_t flags);
 
+extern char *
+drmGetFormatModifierVendor(uint64_t modifier);
+
+extern char *
+drmGetFormatModifierName(uint64_t modifier);
+
+#ifndef fourcc_mod_get_vendor
+#define fourcc_mod_get_vendor(modifier) \
+       (((modifier) >> 56) & 0xff)
+#endif
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/xf86drmMode.c b/xf86drmMode.c
index c3920b9..0106954 100644
--- a/xf86drmMode.c
+++ b/xf86drmMode.c
@@ -38,6 +38,9 @@
 #include <stdlib.h>
 #include <sys/ioctl.h>
 #if HAVE_SYS_SYSCTL_H
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#endif
 #include <sys/sysctl.h>
 #endif
 #include <stdio.h>
diff --git a/xf86drmMode.h b/xf86drmMode.h
index 7269678..de0e2fd 100644
--- a/xf86drmMode.h
+++ b/xf86drmMode.h
@@ -142,13 +142,15 @@
 	uint32_t *blob_ids; /* store the blob IDs */
 } drmModePropertyRes, *drmModePropertyPtr;
 
-static __inline int drm_property_type_is(drmModePropertyPtr property,
+static inline uint32_t drmModeGetPropertyType(const drmModePropertyRes *prop)
+{
+	return prop->flags & (DRM_MODE_PROP_LEGACY_TYPE | DRM_MODE_PROP_EXTENDED_TYPE);
+}
+
+static inline int drm_property_type_is(const drmModePropertyPtr property,
 		uint32_t type)
 {
-	/* instanceof for props.. handles extended type vs original types: */
-	if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-		return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
-	return property->flags & type;
+	return drmModeGetPropertyType(property) == type;
 }
 
 typedef struct _drmModeCrtc {