Merge remote-tracking branch 'aosp/upstream-master' into HEAD

Test: None
Bug: 37224300
Change-Id: Ie84cf493bacb6afe0eb1e647cb8d1b20fe92edd0
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..6628ecd
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,15 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+BasedOnStyle: LLVM
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+BreakBeforeBraces: Linux
+ColumnLimit: 100
+IndentWidth: 8
+TabWidth: 8
+UseTab: Always
+Cpp11BracedListStyle: false
+IndentCaseLabels: false
diff --git a/Android.gralloc.mk b/Android.gralloc.mk
new file mode 100644
index 0000000..4a0b125
--- /dev/null
+++ b/Android.gralloc.mk
@@ -0,0 +1,10 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+LOCAL_CPP_EXTENSION := .cc
+
+LOCAL_SRC_FILES += \
+	cros_gralloc/cros_alloc_device.cc \
+	cros_gralloc/cros_gralloc_helpers.cc \
+	cros_gralloc/cros_gralloc_module.cc
diff --git a/Android.mk b/Android.mk
new file mode 100644
index 0000000..d5225ec
--- /dev/null
+++ b/Android.mk
@@ -0,0 +1,59 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+ifeq ($(strip $(BOARD_USES_MINIGBM)), true)
+
+MINIGBM_GRALLOC_MK := $(call my-dir)/Android.gralloc.mk
+LOCAL_PATH := $(call my-dir)
+intel_drivers := i915 i965
+include $(CLEAR_VARS)
+
+SUBDIRS := cros_gralloc
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils \
+	libdrm
+
+LOCAL_SRC_FILES := \
+	amdgpu.c \
+	cirrus.c \
+	drv.c \
+	evdi.c \
+	exynos.c \
+	gma500.c \
+	helpers.c \
+	i915.c \
+	marvell.c \
+	mediatek.c \
+	nouveau.c \
+	rockchip.c \
+	tegra.c \
+	udl.c \
+	vc4.c \
+	vgem.c \
+	virtio_gpu.c
+
+include $(MINIGBM_GRALLOC_MK)
+
+LOCAL_CPPFLAGS += -std=c++11 -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
+LOCAL_CFLAGS += -Wall -Wsign-compare -Wpointer-arith \
+		-Wcast-qual -Wcast-align \
+		-D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
+
+ifneq ($(filter $(intel_drivers), $(BOARD_GPU_DRIVERS)),)
+LOCAL_CPPFLAGS += -DDRV_I915
+LOCAL_CFLAGS += -DDRV_I915
+LOCAL_SHARED_LIBRARIES += libdrm_intel
+endif
+
+LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
+LOCAL_MODULE_TAGS := optional
+# The preferred path for vendor HALs is /vendor/lib/hw
+LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+LOCAL_MODULE_SUFFIX := $(TARGET_SHLIB_SUFFIX)
+include $(BUILD_SHARED_LIBRARY)
+
+#endif
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..3bd5095
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2017 The Chromium OS Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..32cbb57
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,51 @@
+# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+include common.mk
+
+PC_DEPS = libdrm
+PC_CFLAGS := $(shell $(PKG_CONFIG) --cflags $(PC_DEPS))
+PC_LIBS := $(shell $(PKG_CONFIG) --libs $(PC_DEPS))
+
+CPPFLAGS += -D_GNU_SOURCE=1
+CFLAGS += -std=c99 -Wall -Wsign-compare -Wpointer-arith -Wcast-qual \
+	  -Wcast-align -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
+
+ifdef DRV_AMDGPU
+	CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_amdgpu)
+	LDLIBS += -lamdgpuaddr
+endif
+ifdef DRV_EXYNOS
+	CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_exynos)
+endif
+ifdef DRV_I915
+	CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_intel)
+endif
+ifdef DRV_ROCKCHIP
+	CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_rockchip)
+endif
+
+CPPFLAGS += $(PC_CFLAGS)
+LDLIBS += $(PC_LIBS)
+
+LIBDIR ?= /usr/lib/
+
+GBM_VERSION_MAJOR := 1
+MINIGBM_VERSION := $(GBM_VERSION_MAJOR).0.0
+MINIGBM_FILENAME := libminigbm.so.$(MINIGBM_VERSION)
+
+CC_LIBRARY($(MINIGBM_FILENAME)): LDFLAGS += -Wl,-soname,libgbm.so.$(GBM_VERSION_MAJOR)
+CC_LIBRARY($(MINIGBM_FILENAME)): $(C_OBJECTS)
+
+all: CC_LIBRARY($(MINIGBM_FILENAME))
+
+clean: CLEAN($(MINIGBM_FILENAME))
+
+install: all
+	mkdir -p $(DESTDIR)/$(LIBDIR)
+	install -D -m 755 $(OUT)/$(MINIGBM_FILENAME) $(DESTDIR)/$(LIBDIR)
+	ln -sf $(MINIGBM_FILENAME) $(DESTDIR)/$(LIBDIR)/libgbm.so
+	ln -sf $(MINIGBM_FILENAME) $(DESTDIR)/$(LIBDIR)/libgbm.so.$(GBM_VERSION_MAJOR)
+	install -D -m 0644 $(SRC)/gbm.pc $(DESTDIR)$(LIBDIR)/pkgconfig/gbm.pc
+	install -D -m 0644 $(SRC)/gbm.h $(DESTDIR)/usr/include/gbm.h
diff --git a/PRESUBMIT.cfg b/PRESUBMIT.cfg
new file mode 100644
index 0000000..5dfbb4b
--- /dev/null
+++ b/PRESUBMIT.cfg
@@ -0,0 +1,12 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+[Hook Overrides]
+stray_whitespace_check: false
+long_line_check: false
+cros_license_check: false
+tab_check: false
+bug_field_check: false
+test_field_check: false
+[Hook Scripts]
+hook0 = ./presubmit.sh
diff --git a/amdgpu.c b/amdgpu.c
new file mode 100644
index 0000000..ee05009
--- /dev/null
+++ b/amdgpu.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifdef DRV_AMDGPU
+#include <amdgpu.h>
+#include <amdgpu_drm.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "addrinterface.h"
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
+#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
+#endif
+
+// clang-format off
+#define mmCC_RB_BACKEND_DISABLE		0x263d
+#define mmGB_TILE_MODE0			0x2644
+#define mmGB_MACROTILE_MODE0		0x2664
+#define mmGB_ADDR_CONFIG		0x263e
+#define mmMC_ARB_RAMCFG			0x9d8
+
+enum {
+	FAMILY_UNKNOWN,
+	FAMILY_SI,
+	FAMILY_CI,
+	FAMILY_KV,
+	FAMILY_VI,
+	FAMILY_CZ,
+	FAMILY_PI,
+	FAMILY_LAST,
+};
+// clang-format on
+
+const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XBGR8888,
+						  DRM_FORMAT_XRGB8888 };
+
+const static uint32_t texture_source_formats[] = { DRM_FORMAT_NV21, DRM_FORMAT_NV12 };
+
+static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
+{
+	struct drm_amdgpu_gem_metadata args = { 0 };
+
+	if (!info)
+		return -EINVAL;
+
+	args.handle = handle;
+	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
+	args.data.flags = info->flags;
+	args.data.tiling_info = info->tiling_info;
+
+	if (info->size_metadata > sizeof(args.data.data))
+		return -EINVAL;
+
+	if (info->size_metadata) {
+		args.data.data_size_bytes = info->size_metadata;
+		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
+	}
+
+	return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
+}
+
+static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance,
+			       uint32_t flags, uint32_t *values)
+{
+	struct drm_amdgpu_info request;
+
+	memset(&request, 0, sizeof(request));
+	request.return_pointer = (uintptr_t)values;
+	request.return_size = count * sizeof(uint32_t);
+	request.query = AMDGPU_INFO_READ_MMR_REG;
+	request.read_mmr_reg.dword_offset = dword_offset;
+	request.read_mmr_reg.count = count;
+	request.read_mmr_reg.instance = instance;
+	request.read_mmr_reg.flags = flags;
+
+	return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
+}
+
+static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
+{
+	int ret;
+	uint32_t instance;
+
+	if (!gpu_info)
+		return -EINVAL;
+
+	instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
+
+	ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
+				  &gpu_info->backend_disable[0]);
+	if (ret)
+		return ret;
+	/* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
+	gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff;
+
+	ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
+				  gpu_info->gb_macro_tile_mode);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
+{
+	return malloc(in->sizeInBytes);
+}
+
+static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
+{
+	free(in->pVirtAddr);
+	return ADDR_OK;
+}
+
+static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format,
+				  uint32_t usage, uint32_t *tiling_flags,
+				  ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
+{
+	ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 };
+	ADDR_TILEINFO addr_tile_info = { 0 };
+	ADDR_TILEINFO addr_tile_info_out = { 0 };
+	uint32_t bits_per_pixel;
+
+	addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
+
+	/* Set the requested tiling mode. */
+	addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
+	if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+		addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
+	else if (width <= 16 || height <= 16)
+		addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
+
+	bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8;
+	/* Bits per pixel should be calculated from format*/
+	addr_surf_info_in.bpp = bits_per_pixel;
+	addr_surf_info_in.numSamples = 1;
+	addr_surf_info_in.width = width;
+	addr_surf_info_in.height = height;
+	addr_surf_info_in.numSlices = 1;
+	addr_surf_info_in.pTileInfo = &addr_tile_info;
+	addr_surf_info_in.tileIndex = -1;
+
+	/* This disables incorrect calculations (hacks) in addrlib. */
+	addr_surf_info_in.flags.noStencil = 1;
+
+	/* Set the micro tile type. */
+	if (usage & BO_USE_SCANOUT)
+		addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
+	else
+		addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
+
+	addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
+	addr_out->pTileInfo = &addr_tile_info_out;
+
+	if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK)
+		return -EINVAL;
+
+	ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 };
+	ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 };
+	ADDR_TILEINFO s_tile_hw_info_out = { 0 };
+
+	s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
+	/* Convert from real value to HW value */
+	s_in.reverse = 0;
+	s_in.pTileInfo = &addr_tile_info_out;
+	s_in.tileIndex = -1;
+
+	s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
+	s_out.pTileInfo = &s_tile_hw_info_out;
+
+	if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
+		return -EINVAL;
+
+	if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
+		/* 2D_TILED_THIN1 */
+		*tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
+	else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
+		/* 1D_TILED_THIN1 */
+		*tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
+	else
+		/* LINEAR_ALIGNED */
+		*tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
+
+	*tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth));
+	*tiling_flags |=
+	    AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight));
+	*tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes);
+	*tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
+					   drv_log_base2(addr_tile_info_out.macroAspectRatio));
+	*tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig);
+	*tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
+
+	return 0;
+}
+
+static void *amdgpu_addrlib_init(int fd)
+{
+	int ret;
+	ADDR_CREATE_INPUT addr_create_input = { 0 };
+	ADDR_CREATE_OUTPUT addr_create_output = { 0 };
+	ADDR_REGISTER_VALUE reg_value = { 0 };
+	ADDR_CREATE_FLAGS create_flags = { { 0 } };
+	ADDR_E_RETURNCODE addr_ret;
+
+	addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
+	addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
+
+	struct amdgpu_gpu_info gpu_info = { 0 };
+
+	ret = amdgpu_query_gpu(fd, &gpu_info);
+
+	if (ret) {
+		fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
+		return NULL;
+	}
+
+	reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
+	reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
+	reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
+
+	reg_value.backendDisables = gpu_info.backend_disable[0];
+	reg_value.pTileConfig = gpu_info.gb_tile_mode;
+	reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]);
+	reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
+	reg_value.noOfMacroEntries =
+	    sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]);
+	create_flags.value = 0;
+	create_flags.useTileIndex = 1;
+
+	addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
+
+	addr_create_input.chipFamily = FAMILY_CZ;
+	addr_create_input.createFlags = create_flags;
+	addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
+	addr_create_input.callbacks.freeSysMem = free_sys_mem;
+	addr_create_input.callbacks.debugPrint = 0;
+	addr_create_input.regValue = reg_value;
+
+	addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
+
+	if (addr_ret != ADDR_OK) {
+		fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
+		return NULL;
+	}
+
+	return addr_create_output.hLib;
+}
+
+static int amdgpu_init(struct driver *drv)
+{
+	int ret;
+	void *addrlib;
+	struct format_metadata metadata;
+	uint32_t flags = BO_USE_RENDER_MASK;
+
+	addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
+	if (!addrlib)
+		return -1;
+
+	drv->priv = addrlib;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT);
+
+	metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
+	metadata.priority = 2;
+	metadata.modifier = DRM_FORMAT_MOD_NONE;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
+
+	metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
+	metadata.priority = 3;
+	metadata.modifier = DRM_FORMAT_MOD_NONE;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	flags &= ~BO_USE_SW_WRITE_OFTEN;
+	flags &= ~BO_USE_SW_READ_OFTEN;
+	flags &= ~BO_USE_LINEAR;
+
+	metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
+	metadata.priority = 4;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
+
+	metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
+	metadata.priority = 5;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static void amdgpu_close(struct driver *drv)
+{
+	AddrDestroy(drv->priv);
+	drv->priv = NULL;
+}
+
+static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			    uint32_t usage)
+{
+	void *addrlib = bo->drv->priv;
+	union drm_amdgpu_gem_create gem_create;
+	struct amdgpu_bo_metadata metadata = { 0 };
+	ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
+	uint32_t tiling_flags = 0;
+	uint32_t gem_create_flags = 0;
+	size_t plane;
+	int ret;
+
+	if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
+		drv_bo_from_format(bo, ALIGN(width, 64), height, format);
+	} else {
+		if (amdgpu_addrlib_compute(addrlib, width, height, format, usage, &tiling_flags,
+					   &addr_out) < 0)
+			return -EINVAL;
+
+		bo->tiling = tiling_flags;
+		/* RGB has 1 plane only */
+		bo->offsets[0] = 0;
+		bo->total_size = bo->sizes[0] = addr_out.surfSize;
+		bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8);
+	}
+
+	if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN |
+		     BO_USE_SW_WRITE_RARELY | BO_USE_SW_READ_RARELY))
+		gem_create_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+	else
+		gem_create_flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+
+	memset(&gem_create, 0, sizeof(gem_create));
+
+	gem_create.in.bo_size = bo->total_size;
+	gem_create.in.alignment = addr_out.baseAlign;
+	/* Set the placement. */
+	gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
+	gem_create.in.domain_flags = gem_create_flags;
+	/* Allocate the buffer with the preferred heap. */
+	ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
+				  sizeof(gem_create));
+
+	if (ret < 0)
+		return ret;
+
+	metadata.tiling_info = tiling_flags;
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = gem_create.out.handle;
+
+	ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata);
+
+	return ret;
+}
+
+static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	union drm_amdgpu_gem_mmap gem_map;
+
+	memset(&gem_map, 0, sizeof(gem_map));
+	gem_map.in.handle = bo->handles[plane].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
+		return MAP_FAILED;
+	}
+	data->length = bo->total_size;
+
+	return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+		    gem_map.out.addr_ptr);
+}
+
+static uint32_t amdgpu_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_NV12;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_amdgpu = {
+	.name = "amdgpu",
+	.init = amdgpu_init,
+	.close = amdgpu_close,
+	.bo_create = amdgpu_bo_create,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = amdgpu_bo_map,
+	.resolve_format = amdgpu_resolve_format,
+};
+
+#endif
diff --git a/cirrus.c b/cirrus.c
new file mode 100644
index 0000000..4f0e983
--- /dev/null
+++ b/cirrus.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB888,
+						  DRM_FORMAT_XRGB8888 };
+
+static int cirrus_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+struct backend backend_cirrus = {
+	.name = "cirrus",
+	.init = cirrus_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
diff --git a/common.mk b/common.mk
new file mode 100644
index 0000000..3d7e7d4
--- /dev/null
+++ b/common.mk
@@ -0,0 +1,931 @@
+# Copyright 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# If this file is part of another source distribution, it's license may be
+# stored in LICENSE.makefile or LICENSE.common.mk.
+#
+# NOTE NOTE NOTE
+#  The authoritative common.mk is located in:
+#    https://chromium.googlesource.com/chromiumos/platform2/+/master/common-mk
+#  Please make all changes there, then copy into place in other repos.
+# NOTE NOTE NOTE
+#
+# This file provides a common architecture for building C/C++ source trees.
+# It uses recursive makefile inclusion to create a single make process which
+# can be built in the source tree or with the build artifacts placed elsewhere.
+#
+# It is fully parallelizable for all targets, including static archives.
+#
+# To use:
+# 1. Place common.mk in your top source level
+# 2. In your top-level Makefile, place "include common.mk" at the top
+# 3. In all subdirectories, create a 'module.mk' file that starts with:
+#      include common.mk
+#    And then contains the remainder of your targets.
+# 4. All build targets should look like:
+#    relative/path/target: relative/path/obj.o
+#
+# See existing makefiles for rule examples.
+#
+# Exported macros:
+#   - cc_binary, cxx_binary provide standard compilation steps for binaries
+#   - cxx_library, cc_library provide standard compilation steps for
+#     shared objects.
+#   All of the above optionally take an argument for extra flags.
+#   - update_archive creates/updates a given .a target
+#
+# Instead of using the build macros, most users can just use wrapped targets:
+#   - CXX_BINARY, CC_BINARY, CC_STATIC_BINARY, CXX_STATIC_BINARY
+#   - CXX_LIBRARY, CC_LIBRARY, CC_STATIC_LIBRARY, CXX_STATIC_LIBRARY
+#   - E.g., CXX_BINARY(mahbinary): foo.o
+#   - object.depends targets may be used when a prerequisite is required for an
+#     object file. Because object files result in multiple build artifacts to
+#     handle PIC and PIE weirdness. E.g.
+#       foo.o.depends: generated/dbus.h
+#   - TEST(binary) or TEST(CXX_BINARY(binary)) may be used as a prerequisite
+#     for the tests target to trigger an automated test run.
+#   - CLEAN(file_or_dir) dependency can be added to 'clean'.
+#
+# If source code is being generated, rules will need to be registered for
+# compiling the objects.  This can be done by adding one of the following
+# to the Makefile:
+#   - For C source files
+#   $(eval $(call add_object_rules,sub/dir/gen_a.o sub/dir/b.o,CC,c,CFLAGS))
+#   - For C++ source files
+#   $(eval $(call add_object_rules,sub/dir/gen_a.o sub/dir/b.o,CXX,cc,CXXFLAGS))
+#
+# Exported targets meant to have prerequisites added to:
+#  - all - Your desired targets should be given
+#  - tests - Any TEST(test_binary) targets should be given
+#  - FORCE - force the given target to run regardless of changes
+#            In most cases, using .PHONY is preferred.
+#
+# Possible command line variables:
+#   - COLOR=[0|1] to set ANSI color output (default: 1)
+#   - VERBOSE=[0|1] to hide/show commands (default: 0)
+#   - MODE=[opt|dbg|profiling] (default: opt)
+#          opt - Enable optimizations for release builds
+#          dbg - Turn down optimization for debugging
+#          profiling - Turn off optimization and turn on profiling/coverage
+#                      support.
+#   - ARCH=[x86|arm|supported qemu name] (default: from portage or uname -m)
+#   - SPLITDEBUG=[0|1] splits debug info in target.debug (default: 0)
+#        If NOSTRIP=1, SPLITDEBUG will never strip the final emitted objects.
+#   - NOSTRIP=[0|1] determines if binaries are stripped. (default: 1)
+#        NOSTRIP=0 and MODE=opt will also drop -g from the CFLAGS.
+#   - VALGRIND=[0|1] runs tests under valgrind (default: 0)
+#   - OUT=/path/to/builddir puts all output in given path (default: $PWD)
+#   - VALGRIND_ARGS="" supplies extra memcheck arguments
+#
+# Per-target(-ish) variable:
+#   - NEEDS_ROOT=[0|1] allows a TEST() target to run with root.
+#     Default is 0 unless it is running under QEmu.
+#   - NEEDS_MOUNTS=[0|1] allows a TEST() target running on QEmu to get
+#     setup mounts in the $(SYSROOT)
+#
+# Caveats:
+# - Directories or files with spaces in them DO NOT get along with GNU Make.
+#   If you need them, all uses of dir/notdir/etc will need to have magic
+#   wrappers.  Proceed at risk to your own sanity.
+# - External CXXFLAGS and CFLAGS should be passed via the environment since
+#   this file does not use 'override' to control them.
+# - Our version of GNU Make doesn't seem to support the 'private' variable
+#   annotation, so you can't tag a variable private on a wrapping target.
+
+# Behavior configuration variables
+SPLITDEBUG ?= 0
+NOSTRIP ?= 1
+VALGRIND ?= 0
+COLOR ?= 1
+VERBOSE ?= 0
+MODE ?= opt
+CXXEXCEPTIONS ?= 0
+ARCH ?= $(shell uname -m)
+
+# Put objects in a separate tree based on makefile locations
+# This means you can build a tree without touching it:
+#   make -C $SRCDIR  # will create ./build-$(MODE)
+# Or
+#   make -C $SRCDIR OUT=$PWD
+# This variable is extended on subdir calls and doesn't need to be re-called.
+OUT ?= $(PWD)/
+
+# Make OUT now so we can use realpath.
+$(shell mkdir -p "$(OUT)")
+
+# TODO(wad) Relative paths are resolved against SRC and not the calling dir.
+# Ensure a command-line supplied OUT has a slash
+override OUT := $(realpath $(OUT))/
+
+# SRC is not meant to be set by the end user, but during make call relocation.
+# $(PWD) != $(CURDIR) all the time.
+export SRC ?= $(CURDIR)
+
+# Re-start in the $(OUT) directory if we're not there.
+# We may be invoked using -C or bare and we need to ensure behavior
+# is consistent so we check both PWD vs OUT and PWD vs CURDIR.
+override RELOCATE_BUILD := 0
+ifneq (${PWD}/,${OUT})
+override RELOCATE_BUILD := 1
+endif
+# Make sure we're running with no builtin targets. They cause
+# leakage and mayhem!
+ifneq (${PWD},${CURDIR})
+override RELOCATE_BUILD := 1
+# If we're run from the build dir, don't let it get cleaned up later.
+ifeq (${PWD}/,${OUT})
+$(shell touch "$(PWD)/.dont_delete_on_clean")
+endif
+endif  # ifneq (${PWD},${CURDIR}
+
+# "Relocate" if we need to restart without implicit rules.
+ifeq ($(subst r,,$(MAKEFLAGS)),$(MAKEFLAGS))
+override RELOCATE_BUILD := 1
+endif
+
+ifeq (${RELOCATE_BUILD},1)
+# By default, silence build output. Reused below as well.
+QUIET = @
+ifeq ($(VERBOSE),1)
+  QUIET=
+endif
+
+# This target will override all targets, including prerequisites. To avoid
+# calling $(MAKE) once per prereq on the given CMDGOAL, we guard it with a local
+# variable.
+RUN_ONCE := 0
+MAKECMDGOALS ?= all
+# Keep the rules split as newer make does not allow them to be declared
+# on the same line.  But the way :: rules work, the _all here will also
+# invoke the %:: rule while retaining "_all" as the default.
+_all::
+%::
+	$(if $(filter 0,$(RUN_ONCE)), \
+	  cd "$(OUT)" && \
+	  $(MAKE) -r -I "$(SRC)" -f "$(CURDIR)/Makefile" \
+	    SRC="$(CURDIR)" OUT="$(OUT)" $(foreach g,$(MAKECMDGOALS),"$(g)"),)
+	$(eval RUN_ONCE := 1)
+pass-to-subcall := 1
+endif
+
+ifeq ($(pass-to-subcall),)
+
+# Only call MODULE if we're in a submodule
+MODULES_LIST := $(filter-out Makefile %.d,$(MAKEFILE_LIST))
+ifeq ($(words $(filter-out Makefile common.mk %.d $(SRC)/Makefile \
+                           $(SRC)/common.mk,$(MAKEFILE_LIST))),0)
+
+# All the top-level defines outside of module.mk.
+
+#
+# Helper macros
+#
+
+# Create the directory if it doesn't yet exist.
+define auto_mkdir
+  $(if $(wildcard $(dir $1)),$2,$(QUIET)mkdir -p "$(dir $1)")
+endef
+
+# Creates the actual archive with an index.
+# The target $@ must end with .pic.a or .pie.a.
+define update_archive
+  $(call auto_mkdir,$(TARGET_OR_MEMBER))
+  $(QUIET)# Create the archive in one step to avoid parallel use accessing it
+  $(QUIET)# before all the symbols are present.
+  @$(ECHO) "AR		$(subst \
+$(SRC)/,,$(^:.o=$(suffix $(basename $(TARGET_OR_MEMBER))).o)) \
+-> $(subst $(SRC)/,,$(TARGET_OR_MEMBER))"
+  $(QUIET)$(AR) rcs $(TARGET_OR_MEMBER) \
+          $(subst $(SRC)/,,$(^:.o=$(suffix $(basename $(TARGET_OR_MEMBER))).o))
+endef
+
+# Default compile from objects using pre-requisites but filters out
+# subdirs and .d files.
+define cc_binary
+  $(call COMPILE_BINARY_implementation,CC,$(CFLAGS) $(1),$(EXTRA_FLAGS))
+endef
+
+define cxx_binary
+  $(call COMPILE_BINARY_implementation,CXX,$(CXXFLAGS) $(1),$(EXTRA_FLAGS))
+endef
+
+# Default compile from objects using pre-requisites but filters out
+# subdirs and .d files.
+define cc_library
+  $(call COMPILE_LIBRARY_implementation,CC,$(CFLAGS) $(1),$(EXTRA_FLAGS))
+endef
+define cxx_library
+  $(call COMPILE_LIBRARY_implementation,CXX,$(CXXFLAGS) $(1),$(EXTRA_FLAGS))
+endef
+
+# Deletes files silently if they exist. Meant for use in any local
+# clean targets.
+define silent_rm
+  $(if $(wildcard $(1)),
+  $(QUIET)($(ECHO) -n '$(COLOR_RED)CLEANFILE$(COLOR_RESET)		' && \
+    $(ECHO) '$(subst $(OUT)/,,$(wildcard $(1)))' && \
+    $(RM) $(1) 2>/dev/null) || true,)
+endef
+define silent_rmdir
+  $(if $(wildcard $(1)),
+    $(if $(wildcard $(1)/*),
+  $(QUIET)# $(1) not empty [$(wildcard $(1)/*)]. Not deleting.,
+  $(QUIET)($(ECHO) -n '$(COLOR_RED)CLEANDIR$(COLOR_RESET)		' && \
+    $(ECHO) '$(subst $(OUT)/,,$(wildcard $(1)))' && \
+    $(RMDIR) $(1) 2>/dev/null) || true),)
+endef
+
+#
+# Default variable values
+#
+
+# Only override toolchain vars if they are from make.
+CROSS_COMPILE ?=
+define override_var
+ifneq ($(filter undefined default,$(origin $1)),)
+$1 = $(CROSS_COMPILE)$2
+endif
+endef
+$(eval $(call override_var,AR,ar))
+$(eval $(call override_var,CC,gcc))
+$(eval $(call override_var,CXX,g++))
+$(eval $(call override_var,OBJCOPY,objcopy))
+$(eval $(call override_var,PKG_CONFIG,pkg-config))
+$(eval $(call override_var,RANLIB,ranlib))
+$(eval $(call override_var,STRIP,strip))
+
+RMDIR ?= rmdir
+ECHO = /bin/echo -e
+
+ifeq ($(lastword $(subst /, ,$(CC))),clang)
+CDRIVER = clang
+else
+CDRIVER = gcc
+endif
+
+ifeq ($(lastword $(subst /, ,$(CXX))),clang++)
+CXXDRIVER = clang
+else
+CXXDRIVER = gcc
+endif
+
+# Internal macro to support check_XXX macros below.
+# Usage: $(call check_compile, [code], [compiler], [code_type], [c_flags],
+#               [extra_c_flags], [library_flags], [success_ret], [fail_ret])
+# Return: [success_ret] if compile succeeded, otherwise [fail_ret]
+check_compile = $(shell printf '%b\n' $(1) | \
+  $($(2)) $($(4)) -x $(3) $(LDFLAGS) $(5) - $(6) -o /dev/null > /dev/null 2>&1 \
+  && echo "$(7)" || echo "$(8)")
+
+# Helper macro to check whether a test program will compile with the specified
+# compiler flags.
+# Usage: $(call check_compile_cc, [code], [flags], [alternate_flags])
+# Return: [flags] if compile succeeded, otherwise [alternate_flags]
+check_compile_cc = $(call check_compile,$(1),CC,c,CFLAGS,$(2),,$(2),$(3))
+check_compile_cxx = $(call check_compile,$(1),CXX,c++,CXXFLAGS,$(2),,$(2),$(3))
+
+# Helper macro to check whether a test program will compile with the specified
+# libraries.
+# Usage: $(call check_compile_cc, [code], [library_flags], [alternate_flags])
+# Return: [library_flags] if compile succeeded, otherwise [alternate_flags]
+check_libs_cc = $(call check_compile,$(1),CC,c,CFLAGS,,$(2),$(2),$(3))
+check_libs_cxx = $(call check_compile,$(1),CXX,c++,CXXFLAGS,,$(2),$(2),$(3))
+
+# Helper macro to check whether the compiler accepts the specified flags.
+# Usage: $(call check_compile_cc, [flags], [alternate_flags])
+# Return: [flags] if compile succeeded, otherwise [alternate_flags]
+check_cc = $(call check_compile_cc,'int main() { return 0; }',$(1),$(2))
+check_cxx = $(call check_compile_cxx,'int main() { return 0; }',$(1),$(2))
+
+# Choose the stack protector flags based on whats supported by the compiler.
+SSP_CFLAGS := $(call check_cc,-fstack-protector-strong)
+ifeq ($(SSP_CFLAGS),)
+ SSP_CFLAGS := $(call check_cc,-fstack-protector-all)
+endif
+
+# To update these from an including Makefile:
+#  CXXFLAGS += -mahflag  # Append to the list
+#  CXXFLAGS := -mahflag $(CXXFLAGS) # Prepend to the list
+#  CXXFLAGS := $(filter-out badflag,$(CXXFLAGS)) # Filter out a value
+# The same goes for CFLAGS.
+COMMON_CFLAGS-gcc := -fvisibility=internal -ggdb3 -Wa,--noexecstack
+COMMON_CFLAGS-clang := -fvisibility=hidden -ggdb
+COMMON_CFLAGS := -Wall -Werror -fno-strict-aliasing $(SSP_CFLAGS) -O1 -Wformat=2
+CXXFLAGS += $(COMMON_CFLAGS) $(COMMON_CFLAGS-$(CXXDRIVER))
+CFLAGS += $(COMMON_CFLAGS) $(COMMON_CFLAGS-$(CDRIVER))
+CPPFLAGS += -D_FORTIFY_SOURCE=2
+
+# Disable exceptions based on the CXXEXCEPTIONS setting.
+ifeq ($(CXXEXCEPTIONS),0)
+  CXXFLAGS := $(CXXFLAGS) -fno-exceptions -fno-unwind-tables \
+    -fno-asynchronous-unwind-tables
+endif
+
+ifeq ($(MODE),opt)
+  # Up the optimizations.
+  CFLAGS := $(filter-out -O1,$(CFLAGS)) -O2
+  CXXFLAGS := $(filter-out -O1,$(CXXFLAGS)) -O2
+  # Only drop -g* if symbols aren't desired.
+  ifeq ($(NOSTRIP),0)
+    # TODO: do we want -fomit-frame-pointer on x86?
+    CFLAGS := $(filter-out -ggdb3,$(CFLAGS))
+    CXXFLAGS := $(filter-out -ggdb3,$(CXXFLAGS))
+  endif
+endif
+
+ifeq ($(MODE),profiling)
+  CFLAGS := $(CFLAGS) -O0 -g  --coverage
+  CXXFLAGS := $(CXXFLAGS) -O0 -g  --coverage
+  LDFLAGS := $(LDFLAGS) --coverage
+endif
+
+LDFLAGS := $(LDFLAGS) -Wl,-z,relro -Wl,-z,noexecstack -Wl,-z,now
+
+# Fancy helpers for color if a prompt is defined
+ifeq ($(COLOR),1)
+COLOR_RESET = \x1b[0m
+COLOR_GREEN = \x1b[32;01m
+COLOR_RED = \x1b[31;01m
+COLOR_YELLOW = \x1b[33;01m
+endif
+
+# By default, silence build output.
+QUIET = @
+ifeq ($(VERBOSE),1)
+  QUIET=
+endif
+
+#
+# Implementation macros for compile helpers above
+#
+
+# Useful for dealing with pie-broken toolchains.
+# Call make with PIE=0 to disable default PIE use.
+OBJ_PIE_FLAG = -fPIE
+COMPILE_PIE_FLAG = -pie
+ifeq ($(PIE),0)
+  OBJ_PIE_FLAG =
+  COMPILE_PIE_FLAG =
+endif
+
+# Favor member targets first for CXX_BINARY(%) magic.
+# And strip out nested members if possible.
+LP := (
+RP := )
+TARGET_OR_MEMBER = $(lastword $(subst $(LP), ,$(subst $(RP),,$(or $%,$@))))
+
+# Default compile from objects using pre-requisites but filters out
+# all non-.o files.
+define COMPILE_BINARY_implementation
+  @$(ECHO) "LD$(1)		$(subst $(PWD)/,,$(TARGET_OR_MEMBER))"
+  $(call auto_mkdir,$(TARGET_OR_MEMBER))
+  $(QUIET)$($(1)) $(COMPILE_PIE_FLAGS) -o $(TARGET_OR_MEMBER) \
+    $(2) $(LDFLAGS) \
+    $(filter %.o %.a,$(^:.o=.pie.o)) \
+    $(foreach so,$(filter %.so,$^),-L$(dir $(so)) \
+                            -l$(patsubst lib%,%,$(basename $(notdir $(so))))) \
+    $(LDLIBS)
+  $(call conditional_strip)
+  @$(ECHO) -n "BIN		"
+  @$(ECHO) "$(COLOR_GREEN)$(subst $(PWD)/,,$(TARGET_OR_MEMBER))$(COLOR_RESET)"
+  @$(ECHO) "	$(COLOR_YELLOW)-----$(COLOR_RESET)"
+endef
+
+# TODO: add version support extracted from PV environment variable
+#ifeq ($(PV),9999)
+#$(warning PV=$(PV). If shared object versions matter, please force PV=.)
+#endif
+# Then add -Wl,-soname,$@.$(PV) ?
+
+# Default compile from objects using pre-requisites but filters out
+# all non-.o values. (Remember to add -L$(OUT) -llib)
+COMMA := ,
+define COMPILE_LIBRARY_implementation
+  @$(ECHO) "SHARED$(1)	$(subst $(PWD)/,,$(TARGET_OR_MEMBER))"
+  $(call auto_mkdir,$(TARGET_OR_MEMBER))
+  $(QUIET)$($(1)) -shared -Wl,-E -o $(TARGET_OR_MEMBER) \
+    $(2) $(LDFLAGS) \
+    $(if $(filter %.a,$^),-Wl$(COMMA)--whole-archive,) \
+    $(filter %.o ,$(^:.o=.pic.o)) \
+    $(foreach a,$(filter %.a,$^),-L$(dir $(a)) \
+                            -l$(patsubst lib%,%,$(basename $(notdir $(a))))) \
+    $(foreach so,$(filter %.so,$^),-L$(dir $(so)) \
+                            -l$(patsubst lib%,%,$(basename $(notdir $(so))))) \
+    $(LDLIBS)
+  $(call conditional_strip)
+  @$(ECHO) -n "LIB		$(COLOR_GREEN)"
+  @$(ECHO) "$(subst $(PWD)/,,$(TARGET_OR_MEMBER))$(COLOR_RESET)"
+  @$(ECHO) "	$(COLOR_YELLOW)-----$(COLOR_RESET)"
+endef
+
+define conditional_strip
+  $(if $(filter 0,$(NOSTRIP)),$(call strip_artifact))
+endef
+
+define strip_artifact
+  @$(ECHO) "STRIP		$(subst $(OUT)/,,$(TARGET_OR_MEMBER))"
+  $(if $(filter 1,$(SPLITDEBUG)), @$(ECHO) -n "DEBUG	"; \
+    $(ECHO) "$(COLOR_YELLOW)\
+$(subst $(OUT)/,,$(TARGET_OR_MEMBER)).debug$(COLOR_RESET)")
+  $(if $(filter 1,$(SPLITDEBUG)), \
+    $(QUIET)$(OBJCOPY) --only-keep-debug "$(TARGET_OR_MEMBER)" \
+      "$(TARGET_OR_MEMBER).debug")
+  $(if $(filter-out dbg,$(MODE)),$(QUIET)$(STRIP) --strip-unneeded \
+    "$(TARGET_OR_MEMBER)",)
+endef
+
+#
+# Global pattern rules
+#
+
+# Below, the archive member syntax is abused to create fancier
+# syntactic sugar for recipe authors that avoids needed to know
+# subcall options.  The downside is that make attempts to look
+# into the phony archives for timestamps. This will cause the final
+# target to be rebuilt/linked on _every_ call to make even when nothing
+# has changed.  Until a better way presents itself, we have helpers that
+# do the stat check on make's behalf.  Dodgy but simple.
+define old_or_no_timestamp
+  $(if $(realpath $%),,$(1))
+  $(if $(shell find $^ -cnewer "$%" 2>/dev/null),$(1))
+endef
+
+define check_deps
+  $(if $(filter 0,$(words $^)),\
+    $(error Missing dependencies or declaration of $@($%)),)
+endef
+
+# Build a cxx target magically
+CXX_BINARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cxx_binary))
+clean: CLEAN(CXX_BINARY*)
+
+CC_BINARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cc_binary))
+clean: CLEAN(CC_BINARY*)
+
+CXX_STATIC_BINARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cxx_binary,-static))
+clean: CLEAN(CXX_STATIC_BINARY*)
+
+CC_STATIC_BINARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cc_binary,-static))
+clean: CLEAN(CC_STATIC_BINARY*)
+
+CXX_LIBRARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cxx_library))
+clean: CLEAN(CXX_LIBRARY*)
+
+CXX_LIBARY(%):
+	$(error Typo alert! LIBARY != LIBRARY)
+
+CC_LIBRARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call cc_library))
+clean: CLEAN(CC_LIBRARY*)
+
+CC_LIBARY(%):
+	$(error Typo alert! LIBARY != LIBRARY)
+
+CXX_STATIC_LIBRARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call update_archive))
+clean: CLEAN(CXX_STATIC_LIBRARY*)
+
+CXX_STATIC_LIBARY(%):
+	$(error Typo alert! LIBARY != LIBRARY)
+
+CC_STATIC_LIBRARY(%):
+	$(call check_deps)
+	$(call old_or_no_timestamp,$(call update_archive))
+clean: CLEAN(CC_STATIC_LIBRARY*)
+
+CC_STATIC_LIBARY(%):
+	$(error Typo alert! LIBARY != LIBRARY)
+
+
+TEST(%): % qemu_chroot_install
+	$(call TEST_implementation)
+.PHONY: TEST
+
+# multiple targets with a wildcard need to share an directory.
+# Don't use this directly it just makes sure the directory is removed _after_
+# the files are.
+CLEANFILE(%):
+	$(call silent_rm,$(TARGET_OR_MEMBER))
+.PHONY: CLEANFILE
+
+CLEAN(%): CLEANFILE(%)
+	$(QUIET)# CLEAN($%) meta-target called
+	$(if $(filter-out $(PWD)/,$(dir $(abspath $(TARGET_OR_MEMBER)))), \
+	  $(call silent_rmdir,$(dir $(abspath $(TARGET_OR_MEMBER)))),\
+	  $(QUIET)# Not deleting $(dir $(abspath $(TARGET_OR_MEMBER))) yet.)
+.PHONY: CLEAN
+
+#
+# Top-level objects and pattern rules
+#
+
+# All objects for .c files at the top level
+C_OBJECTS = $(patsubst $(SRC)/%.c,%.o,$(wildcard $(SRC)/*.c))
+
+
+# All objects for .cxx files at the top level
+CXX_OBJECTS = $(patsubst $(SRC)/%.cc,%.o,$(wildcard $(SRC)/*.cc))
+
+# Note, the catch-all pattern rules don't work in subdirectories because
+# we're building from the $(OUT) directory. At the top-level (here) they will
+# work, but we go ahead and match using the module form.  Then we can place a
+# generic pattern rule to capture leakage from the main Makefile. (Later in the
+# file.)
+#
+# The reason target specific pattern rules work well for modules,
+# MODULE_C_OBJECTS, is because it scopes the behavior to the given target which
+# ensures we get a relative directory offset from $(OUT) which otherwise would
+# not match without further magic on a per-subdirectory basis.
+
+# Creates object file rules. Call with eval.
+# $(1) list of .o files
+# $(2) source type (CC or CXX)
+# $(3) source suffix (cc or c)
+# $(4) compiler flag name (CFLAGS or CXXFLAGS)
+# $(5) source dir: _only_ if $(SRC). Leave blank for obj tree.
+define add_object_rules
+$(patsubst %.o,%.pie.o,$(1)): %.pie.o: $(5)%.$(3) %.o.depends
+	$$(call auto_mkdir,$$@)
+	$$(call OBJECT_PATTERN_implementation,$(2),\
+          $$(basename $$@),$$($(4)) $$(CPPFLAGS) $$(OBJ_PIE_FLAG))
+
+$(patsubst %.o,%.pic.o,$(1)): %.pic.o: $(5)%.$(3) %.o.depends
+	$$(call auto_mkdir,$$@)
+	$$(call OBJECT_PATTERN_implementation,$(2),\
+          $$(basename $$@),$$($(4)) $$(CPPFLAGS) -fPIC)
+
+# Placeholder for depends
+$(patsubst %.o,%.o.depends,$(1)):
+	$$(call auto_mkdir,$$@)
+	$$(QUIET)touch "$$@"
+
+$(1): %.o: %.pic.o %.pie.o
+	$$(call auto_mkdir,$$@)
+	$$(QUIET)touch "$$@"
+endef
+
+define OBJECT_PATTERN_implementation
+  @$(ECHO) "$(1)		$(subst $(SRC)/,,$<) -> $(2).o"
+  $(call auto_mkdir,$@)
+  $(QUIET)$($(1)) -c -MD -MF $(2).d $(3) -o $(2).o $<
+  $(QUIET)# Wrap all the deps in $$(wildcard) so a missing header
+  $(QUIET)# won't cause weirdness.  First we remove newlines and \,
+  $(QUIET)# then wrap it.
+  $(QUIET)sed -i -e :j -e '$$!N;s|\\\s*\n| |;tj' \
+    -e 's|^\(.*\s*:\s*\)\(.*\)$$|\1 $$\(wildcard \2\)|' $(2).d
+endef
+
+# Now actually register handlers for C(XX)_OBJECTS.
+$(eval $(call add_object_rules,$(C_OBJECTS),CC,c,CFLAGS,$(SRC)/))
+$(eval $(call add_object_rules,$(CXX_OBJECTS),CXX,cc,CXXFLAGS,$(SRC)/))
+
+# Disable default pattern rules to help avoid leakage.
+# These may already be handled by '-r', but let's keep it to be safe.
+%: %.o ;
+%.a: %.o ;
+%.o: %.c ;
+%.o: %.cc ;
+
+# NOTE: A specific rule for archive objects is avoided because parallel
+#       update of the archive causes build flakiness.
+# Instead, just make the objects the prerequisites and use update_archive
+# To use the foo.a(obj.o) functionality, targets would need to specify the
+# explicit object they expect on the prerequisite line.
+
+#
+# Architecture detection and QEMU wrapping
+#
+
+HOST_ARCH ?= $(shell uname -m)
+override ARCH := $(strip $(ARCH))
+override HOST_ARCH := $(strip $(HOST_ARCH))
+# emake will supply "x86" or "arm" for ARCH, but
+# if uname -m runs and you get x86_64, then this subst
+# will break.
+ifeq ($(subst x86,i386,$(ARCH)),i386)
+  QEMU_ARCH := $(subst x86,i386,$(ARCH))  # x86 -> i386
+else ifeq ($(subst amd64,x86_64,$(ARCH)),x86_64)
+  QEMU_ARCH := $(subst amd64,x86_64,$(ARCH))  # amd64 -> x86_64
+else
+  QEMU_ARCH = $(ARCH)
+endif
+override QEMU_ARCH := $(strip $(QEMU_ARCH))
+
+# If we're cross-compiling, try to use qemu for running the tests.
+ifneq ($(QEMU_ARCH),$(HOST_ARCH))
+  ifeq ($(SYSROOT),)
+    $(info SYSROOT not defined. qemu-based testing disabled)
+  else
+    # A SYSROOT is assumed for QEmu use.
+    USE_QEMU ?= 1
+
+    # Allow 64-bit hosts to run 32-bit without qemu.
+    ifeq ($(HOST_ARCH),x86_64)
+      ifeq ($(QEMU_ARCH),i386)
+        USE_QEMU = 0
+      endif
+    endif
+  endif
+else
+  USE_QEMU ?= 0
+endif
+
+# Normally we don't need to run as root or do bind mounts, so only
+# enable it by default when we're using QEMU.
+NEEDS_ROOT ?= $(USE_QEMU)
+NEEDS_MOUNTS ?= $(USE_QEMU)
+
+SYSROOT_OUT = $(OUT)
+ifneq ($(SYSROOT),)
+  SYSROOT_OUT = $(subst $(SYSROOT),,$(OUT))
+else
+  # Default to / when all the empty-sysroot logic is done.
+  SYSROOT = /
+endif
+
+QEMU_NAME = qemu-$(QEMU_ARCH)
+QEMU_PATH = /build/bin/$(QEMU_NAME)
+QEMU_SYSROOT_PATH = $(SYSROOT)$(QEMU_PATH)
+QEMU_SRC_PATH = /usr/bin/$(QEMU_NAME)
+QEMU_BINFMT_PATH = /proc/sys/fs/binfmt_misc/$(QEMU_NAME)
+QEMU_REGISTER_PATH = /proc/sys/fs/binfmt_misc/register
+
+QEMU_MAGIC_arm = ":$(QEMU_NAME):M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/build/bin/qemu-arm:"
+
+
+#
+# Output full configuration at top level
+#
+
+# Don't show on clean
+ifneq ($(MAKECMDGOALS),clean)
+  $(info build configuration:)
+  $(info - OUT=$(OUT))
+  $(info - SRC=$(SRC))
+  $(info - MODE=$(MODE))
+  $(info - SPLITDEBUG=$(SPLITDEBUG))
+  $(info - NOSTRIP=$(NOSTRIP))
+  $(info - VALGRIND=$(VALGRIND))
+  $(info - COLOR=$(COLOR))
+  $(info - CXXEXCEPTIONS=$(CXXEXCEPTIONS))
+  $(info - ARCH=$(ARCH))
+  $(info - QEMU_ARCH=$(QEMU_ARCH))
+  $(info - USE_QEMU=$(USE_QEMU))
+  $(info - NEEDS_ROOT=$(NEEDS_ROOT))
+  $(info - NEEDS_MOUNTS=$(NEEDS_MOUNTS))
+  $(info - SYSROOT=$(SYSROOT))
+  $(info )
+endif
+
+#
+# Standard targets with detection for when they are improperly configured.
+#
+
+# all does not include tests by default
+all:
+	$(QUIET)(test -z "$^" && \
+	$(ECHO) "You must add your targets as 'all' prerequisites") || true
+	$(QUIET)test -n "$^"
+
+# Builds and runs tests for the target arch
+# Run them in parallel
+# After the test have completed, if profiling, run coverage analysis
+tests:
+ifeq ($(MODE),profiling)
+	@$(ECHO) "COVERAGE [$(COLOR_YELLOW)STARTED$(COLOR_RESET)]"
+	$(QUIET)FILES="";						\
+		for GCNO in `find . -name "*.gcno"`; do			\
+			GCDA="$${GCNO%.gcno}.gcda";			\
+			if [ -e $${GCDA} ]; then			\
+				FILES="$${FILES} $${GCDA}";		\
+			fi						\
+		done;							\
+		if [ -n "$${FILES}" ]; then				\
+			gcov -l $${FILES};				\
+			lcov --capture --directory .			\
+				--output-file=lcov-coverage.info;	\
+			genhtml lcov-coverage.info			\
+				--output-directory lcov-html;		\
+		fi
+	@$(ECHO) "COVERAGE [$(COLOR_YELLOW)FINISHED$(COLOR_RESET)]"
+endif
+.PHONY: tests
+
+qemu_chroot_install:
+ifeq ($(USE_QEMU),1)
+	$(QUIET)$(ECHO) "QEMU   Preparing $(QEMU_NAME)"
+	@# Copying strategy
+	@# Compare /usr/bin/qemu inode to /build/$board/build/bin/qemu, if different
+	@# hard link to a temporary file, then rename temp to target. This should
+	@# ensure that once $QEMU_SYSROOT_PATH exists it will always exist, regardless
+	@# of simultaneous test setups.
+	$(QUIET)if [[ ! -e $(QEMU_SYSROOT_PATH) || \
+	    `stat -c %i $(QEMU_SRC_PATH)` != `stat -c %i $(QEMU_SYSROOT_PATH)` \
+	    ]]; then \
+	  $(ROOT_CMD) ln -Tf $(QEMU_SRC_PATH) $(QEMU_SYSROOT_PATH).$$$$; \
+	  $(ROOT_CMD) mv -Tf $(QEMU_SYSROOT_PATH).$$$$ $(QEMU_SYSROOT_PATH); \
+	fi
+
+	@# Prep the binfmt handler. First mount if needed, then unregister any bad
+	@# mappings and then register our mapping.
+	@# There may still be some race conditions here where one script de-registers
+	@# and another script starts executing before it gets re-registered, however
+	@# it should be rare.
+	-$(QUIET)[[ -e $(QEMU_REGISTER_PATH) ]] || \
+	  $(ROOT_CMD) mount binfmt_misc -t binfmt_misc \
+	    /proc/sys/fs/binfmt_misc
+
+	-$(QUIET)if [[ -e $(QEMU_BINFMT_PATH) && \
+	      `awk '$$1 == "interpreter" {print $$NF}' $(QEMU_BINFMT_PATH)` != \
+	      "$(QEMU_PATH)" ]]; then \
+	  echo -1 | $(ROOT_CMD) tee $(QEMU_BINFMT_PATH) >/dev/null; \
+	fi
+
+	-$(if $(QEMU_MAGIC_$(ARCH)),$(QUIET)[[ -e $(QEMU_BINFMT_PATH) ]] || \
+	  echo $(QEMU_MAGIC_$(ARCH)) | $(ROOT_CMD) tee $(QEMU_REGISTER_PATH) \
+	    >/dev/null)
+endif
+.PHONY: qemu_clean qemu_chroot_install
+
+# TODO(wad) Move to -L $(SYSROOT) and fakechroot when qemu-user
+#           doesn't hang traversing /proc from SYSROOT.
+SUDO_CMD = sudo
+UNSHARE_CMD = unshare
+QEMU_CMD =
+ROOT_CMD = $(if $(filter 1,$(NEEDS_ROOT)),$(SUDO_CMD) , )
+MOUNT_CMD = $(if $(filter 1,$(NEEDS_MOUNTS)),$(ROOT_CMD) mount, \#)
+UMOUNT_CMD = $(if $(filter 1,$(NEEDS_MOUNTS)),$(ROOT_CMD) umount, \#)
+QEMU_LDPATH = $(SYSROOT_LDPATH):/lib64:/lib:/usr/lib64:/usr/lib
+ROOT_CMD_LDPATH = $(SYSROOT_LDPATH):$(SYSROOT)/lib64:
+ROOT_CMD_LDPATH := $(ROOT_CMD_LDPATH):$(SYSROOT)/lib:$(SYSROOT)/usr/lib64:
+ROOT_CMD_LDPATH := $(ROOT_CMD_LDPATH):$(SYSROOT)/usr/lib
+ifeq ($(USE_QEMU),1)
+  export QEMU_CMD = \
+   $(SUDO_CMD) chroot $(SYSROOT) $(QEMU_PATH) \
+   -drop-ld-preload \
+   -E LD_LIBRARY_PATH="$(QEMU_LDPATH):$(patsubst $(OUT),,$(LD_DIRS))" \
+   -E HOME="$(HOME)" -E SRC="$(SRC)" --
+  # USE_QEMU conditional function
+  define if_qemu
+    $(1)
+  endef
+else
+  ROOT_CMD = $(if $(filter 1,$(NEEDS_ROOT)),sudo, ) \
+    LD_LIBRARY_PATH="$(ROOT_CMD_LDPATH):$(LD_DIRS)"
+  define if_qemu
+    $(2)
+  endef
+endif
+
+VALGRIND_CMD =
+ifeq ($(VALGRIND),1)
+  VALGRIND_CMD = /usr/bin/valgrind --tool=memcheck $(VALGRIND_ARGS) --
+endif
+
+define TEST_implementation
+  $(QUIET)$(call TEST_setup)
+  $(QUIET)$(call TEST_run)
+  $(QUIET)$(call TEST_teardown)
+  $(QUIET)exit $$(cat $(OUT)$(TARGET_OR_MEMBER).status.test)
+endef
+
+define TEST_setup
+  @$(ECHO) -n "TEST		$(TARGET_OR_MEMBER) "
+  @$(ECHO) "[$(COLOR_YELLOW)SETUP$(COLOR_RESET)]"
+  $(QUIET)# Setup a target-specific results file
+  $(QUIET)(echo > $(OUT)$(TARGET_OR_MEMBER).setup.test)
+  $(QUIET)(echo 1 > $(OUT)$(TARGET_OR_MEMBER).status.test)
+  $(QUIET)(echo > $(OUT)$(TARGET_OR_MEMBER).cleanup.test)
+  $(QUIET)# No setup if we are not using QEMU
+  $(QUIET)# TODO(wad) this is racy until we use a vfs namespace
+  $(call if_qemu,\
+    $(QUIET)(echo "mkdir -p '$(SYSROOT)/proc' '$(SYSROOT)/dev' \
+                            '$(SYSROOT)/mnt/host/source'" \
+             >> "$(OUT)$(TARGET_OR_MEMBER).setup.test"))
+  $(call if_qemu,\
+    $(QUIET)(echo "$(MOUNT_CMD) --bind /mnt/host/source \
+             '$(SYSROOT)/mnt/host/source'" \
+             >> "$(OUT)$(TARGET_OR_MEMBER).setup.test"))
+  $(call if_qemu,\
+    $(QUIET)(echo "$(MOUNT_CMD) --bind /proc '$(SYSROOT)/proc'" \
+             >> "$(OUT)$(TARGET_OR_MEMBER).setup.test"))
+  $(call if_qemu,\
+    $(QUIET)(echo "$(MOUNT_CMD) --bind /dev '$(SYSROOT)/dev'" \
+             >> "$(OUT)$(TARGET_OR_MEMBER).setup.test"))
+endef
+
+define TEST_teardown
+  @$(ECHO) -n "TEST		$(TARGET_OR_MEMBER) "
+  @$(ECHO) "[$(COLOR_YELLOW)TEARDOWN$(COLOR_RESET)]"
+  $(call if_qemu, $(QUIET)$(SHELL) "$(OUT)$(TARGET_OR_MEMBER).cleanup.test")
+endef
+
+# Use GTEST_ARGS.[arch] if defined.
+override GTEST_ARGS.real = \
+ $(call if_qemu,$(GTEST_ARGS.qemu.$(QEMU_ARCH)),$(GTEST_ARGS.host.$(HOST_ARCH)))
+
+define TEST_run
+  @$(ECHO) -n "TEST		$(TARGET_OR_MEMBER) "
+  @$(ECHO) "[$(COLOR_GREEN)RUN$(COLOR_RESET)]"
+  $(QUIET)(echo 1 > "$(OUT)$(TARGET_OR_MEMBER).status.test")
+  $(QUIET)(echo $(ROOT_CMD) SRC="$(SRC)" $(QEMU_CMD) $(VALGRIND_CMD) \
+    "$(strip $(call if_qemu, $(SYSROOT_OUT),$(OUT))$(TARGET_OR_MEMBER))" \
+      $(if $(filter-out 0,$(words $(GTEST_ARGS.real))),$(GTEST_ARGS.real),\
+           $(GTEST_ARGS)) >> "$(OUT)$(TARGET_OR_MEMBER).setup.test")
+  -$(QUIET)$(call if_qemu,$(SUDO_CMD) $(UNSHARE_CMD) -m) $(SHELL) \
+      $(OUT)$(TARGET_OR_MEMBER).setup.test \
+  && echo 0 > "$(OUT)$(TARGET_OR_MEMBER).status.test"
+endef
+
+# Recursive list reversal so that we get RMDIR_ON_CLEAN in reverse order.
+define reverse
+$(if $(1),$(call reverse,$(wordlist 2,$(words $(1)),$(1)))) $(firstword $(1))
+endef
+
+clean: qemu_clean
+clean: CLEAN($(OUT)*.d) CLEAN($(OUT)*.o) CLEAN($(OUT)*.debug)
+clean: CLEAN($(OUT)*.test) CLEAN($(OUT)*.depends)
+clean: CLEAN($(OUT)*.gcno) CLEAN($(OUT)*.gcda) CLEAN($(OUT)*.gcov)
+clean: CLEAN($(OUT)lcov-coverage.info) CLEAN($(OUT)lcov-html)
+
+clean:
+	$(QUIET)# Always delete the containing directory last.
+	$(call silent_rmdir,$(OUT))
+
+FORCE: ;
+# Empty rule for use when no special targets are needed, like large_tests
+NONE:
+
+.PHONY: clean NONE valgrind NONE
+.DEFAULT_GOAL  :=  all
+# Don't let make blow away "intermediates"
+.PRECIOUS: %.pic.o %.pie.o %.a %.pic.a %.pie.a %.test
+
+# Start accruing build info
+OUT_DIRS = $(OUT)
+LD_DIRS = $(OUT)
+SRC_DIRS = $(SRC)
+
+include $(wildcard $(OUT)*.d)
+SUBMODULE_DIRS = $(wildcard $(SRC)/*/module.mk)
+include $(SUBMODULE_DIRS)
+
+
+else  ## In duplicate inclusions of common.mk
+
+# Get the current inclusion directory without a trailing slash
+MODULE := $(patsubst %/,%, \
+           $(dir $(lastword $(filter-out %common.mk,$(MAKEFILE_LIST)))))
+MODULE := $(subst $(SRC)/,,$(MODULE))
+MODULE_NAME := $(subst /,_,$(MODULE))
+#VPATH := $(MODULE):$(VPATH)
+
+
+# Depth first
+$(eval OUT_DIRS += $(OUT)$(MODULE))
+$(eval SRC_DIRS += $(OUT)$(MODULE))
+$(eval LD_DIRS := $(LD_DIRS):$(OUT)$(MODULE))
+
+# Add the defaults from this dir to rm_clean
+clean: CLEAN($(OUT)$(MODULE)/*.d) CLEAN($(OUT)$(MODULE)/*.o)
+clean: CLEAN($(OUT)$(MODULE)/*.debug) CLEAN($(OUT)$(MODULE)/*.test)
+clean: CLEAN($(OUT)$(MODULE)/*.depends)
+clean: CLEAN($(OUT)$(MODULE)/*.gcno) CLEAN($(OUT)$(MODULE)/*.gcda)
+clean: CLEAN($(OUT)$(MODULE)/*.gcov) CLEAN($(OUT)lcov-coverage.info)
+clean: CLEAN($(OUT)lcov-html)
+
+$(info + submodule: $(MODULE_NAME))
+# We must eval otherwise they may be dropped.
+MODULE_C_OBJECTS = $(patsubst $(SRC)/$(MODULE)/%.c,$(MODULE)/%.o,\
+  $(wildcard $(SRC)/$(MODULE)/*.c))
+$(eval $(MODULE_NAME)_C_OBJECTS ?= $(MODULE_C_OBJECTS))
+MODULE_CXX_OBJECTS = $(patsubst $(SRC)/$(MODULE)/%.cc,$(MODULE)/%.o,\
+  $(wildcard $(SRC)/$(MODULE)/*.cc))
+$(eval $(MODULE_NAME)_CXX_OBJECTS ?= $(MODULE_CXX_OBJECTS))
+
+# Note, $(MODULE) is implicit in the path to the %.c.
+# See $(C_OBJECTS) for more details.
+# Register rules for the module objects.
+$(eval $(call add_object_rules,$(MODULE_C_OBJECTS),CC,c,CFLAGS,$(SRC)/))
+$(eval $(call add_object_rules,$(MODULE_CXX_OBJECTS),CXX,cc,CXXFLAGS,$(SRC)/))
+
+# Continue recursive inclusion of module.mk files
+SUBMODULE_DIRS = $(wildcard $(SRC)/$(MODULE)/*/module.mk)
+include $(wildcard $(OUT)$(MODULE)/*.d)
+include $(SUBMODULE_DIRS)
+
+endif
+endif  ## pass-to-subcall wrapper for relocating the call directory
diff --git a/cros_gralloc/Makefile b/cros_gralloc/Makefile
new file mode 100644
index 0000000..9e1be9e
--- /dev/null
+++ b/cros_gralloc/Makefile
@@ -0,0 +1,43 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+GRALLOC = gralloc.cros.so
+
+SRCS    = $(wildcard *.cc)
+SRCS   += $(wildcard ../*.c)
+SOURCES = $(filter-out ../gbm%, $(SRCS))
+PKG_CONFIG ?= pkg-config
+
+VPATH = $(dir $(SOURCES))
+LIBDRM_CFLAGS := $(shell $(PKG_CONFIG) --cflags libdrm)
+LIBDRM_LIBS := $(shell $(PKG_CONFIG) --libs libdrm)
+
+CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS)
+CXXFLAGS += -std=c++11
+CFLAGS   += -std=c99
+LIBS     += -shared -lcutils -lhardware $(LIBDRM_LIBS)
+
+OBJS =  $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
+
+OBJECTS = $(addprefix $(TARGET_DIR), $(notdir $(OBJS)))
+LIBRARY = $(addprefix $(TARGET_DIR), $(GRALLOC))
+
+.PHONY: all clean
+
+all: $(LIBRARY)
+
+$(LIBRARY): $(OBJECTS)
+
+clean:
+	$(RM) $(LIBRARY)
+	$(RM) $(OBJECTS)
+
+$(LIBRARY):
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) $^ -o $@ $(LIBS)
+
+$(TARGET_DIR)%.o: %.cc
+	$(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $^ -o $@ -MMD
+
+$(TARGET_DIR)%.o: %.c
+	$(CC) $(CPPFLAGS) $(CFLAGS) -c $^ -o $@ -MMD
diff --git a/cros_gralloc/cros_alloc_device.cc b/cros_gralloc/cros_alloc_device.cc
new file mode 100644
index 0000000..5eda6b8
--- /dev/null
+++ b/cros_gralloc/cros_alloc_device.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc.h"
+
+static struct cros_gralloc_bo *cros_gralloc_bo_create(struct driver *drv, int width, int height,
+						      int format, int usage)
+{
+	uint64_t drv_usage;
+	uint32_t drv_format;
+	struct combination *combo;
+	struct cros_gralloc_bo *bo;
+
+	drv_format = cros_gralloc_convert_format(format);
+	drv_format = drv_resolve_format(drv, drv_format);
+	drv_usage = cros_gralloc_convert_flags(usage);
+
+	combo = drv_get_combination(drv, drv_format, drv_usage);
+
+	if (!combo && (usage & GRALLOC_USAGE_HW_COMPOSER)) {
+		drv_usage &= ~BO_USE_SCANOUT;
+		combo = drv_get_combination(drv, drv_format, drv_usage);
+	}
+
+	if (!combo) {
+		cros_gralloc_error("Unsupported combination -- HAL format: %u, HAL flags: %u, "
+				   "drv_format: %4.4s, drv_flags: %llu",
+				   format, usage, reinterpret_cast<char *>(&drv_format),
+				   static_cast<unsigned long long>(drv_usage));
+		return NULL;
+	}
+
+	bo = new cros_gralloc_bo();
+
+	bo->bo = drv_bo_create(drv, width, height, drv_format, drv_usage);
+	if (!bo->bo) {
+		delete bo;
+		cros_gralloc_error("Failed to create bo.");
+		return NULL;
+	}
+
+	/*
+	 * If there is a desire for more than one kernel buffer, this can be
+	 * removed once the ArcCodec and Wayland service have the ability to
+	 * send more than one fd. GL/Vulkan drivers may also have to modified.
+	 */
+	if (drv_num_buffers_per_bo(bo->bo) != 1) {
+		drv_bo_destroy(bo->bo);
+		delete bo;
+		cros_gralloc_error("Can only support one buffer per bo.");
+		return NULL;
+	}
+
+	bo->refcount = 1;
+
+	return bo;
+}
+
+static struct cros_gralloc_handle *cros_gralloc_handle_from_bo(struct bo *bo)
+{
+	uint64_t mod;
+	size_t num_planes;
+	struct cros_gralloc_handle *hnd;
+
+	hnd = new cros_gralloc_handle();
+
+	num_planes = drv_bo_get_num_planes(bo);
+
+	hnd->base.version = sizeof(hnd->base);
+	hnd->base.numFds = num_planes;
+	hnd->base.numInts = handle_data_size - num_planes;
+
+	for (size_t p = 0; p < num_planes; p++) {
+		hnd->fds[p] = drv_bo_get_plane_fd(bo, p);
+		hnd->strides[p] = drv_bo_get_plane_stride(bo, p);
+		hnd->offsets[p] = drv_bo_get_plane_offset(bo, p);
+		hnd->sizes[p] = drv_bo_get_plane_size(bo, p);
+
+		mod = drv_bo_get_plane_format_modifier(bo, p);
+		hnd->format_modifiers[2 * p] = static_cast<uint32_t>(mod >> 32);
+		hnd->format_modifiers[2 * p + 1] = static_cast<uint32_t>(mod);
+	}
+
+	hnd->width = drv_bo_get_width(bo);
+	hnd->height = drv_bo_get_height(bo);
+	hnd->format = drv_bo_get_format(bo);
+	hnd->pixel_stride = drv_bo_get_stride_in_pixels(bo);
+
+	hnd->magic = cros_gralloc_magic;
+
+	return hnd;
+}
+
+static int cros_gralloc_alloc(alloc_device_t *dev, int w, int h, int format, int usage,
+			      buffer_handle_t *handle, int *stride)
+{
+	auto mod = (struct cros_gralloc_module *)dev->common.module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	auto bo = cros_gralloc_bo_create(mod->drv, w, h, format, usage);
+	if (!bo)
+		return CROS_GRALLOC_ERROR_NO_RESOURCES;
+
+	auto hnd = cros_gralloc_handle_from_bo(bo->bo);
+	hnd->droid_format = static_cast<int32_t>(format);
+	hnd->usage = static_cast<int32_t>(usage);
+
+	mod->handles[hnd].registrations = 0;
+	mod->handles[hnd].bo = bo;
+	bo->hnd = hnd;
+
+	mod->buffers[drv_bo_get_plane_handle(bo->bo, 0).u32] = bo;
+
+	*stride = static_cast<int>(hnd->pixel_stride);
+	*handle = &hnd->base;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_free(alloc_device_t *dev, buffer_handle_t handle)
+{
+	struct cros_gralloc_bo *bo;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	auto mod = (struct cros_gralloc_module *)dev->common.module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (mod->handles[hnd].registrations > 0) {
+		cros_gralloc_error("Deallocating before unregistering.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	return cros_gralloc_decrement_reference_count(mod, bo);
+}
+
+static int cros_gralloc_close(struct hw_device_t *dev)
+{
+	auto mod = (struct cros_gralloc_module *)dev->module;
+	auto alloc = (struct alloc_device_t *)dev;
+
+	if (mod->drv) {
+		drv_destroy(mod->drv);
+		mod->drv = NULL;
+	}
+
+	mod->buffers.clear();
+	mod->handles.clear();
+
+	delete alloc;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+int cros_gralloc_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev)
+{
+	auto module = (struct cros_gralloc_module *)mod;
+	std::lock_guard<std::mutex> lock(module->mutex);
+
+	if (module->drv)
+		return CROS_GRALLOC_ERROR_NONE;
+
+	if (strcmp(name, GRALLOC_HARDWARE_GPU0)) {
+		cros_gralloc_error("Incorrect device name - %s.", name);
+		return CROS_GRALLOC_ERROR_UNSUPPORTED;
+	}
+
+	if (cros_gralloc_rendernode_open(&module->drv)) {
+		cros_gralloc_error("Failed to open render node.");
+		return CROS_GRALLOC_ERROR_NO_RESOURCES;
+	}
+
+	auto alloc = new alloc_device_t();
+
+	alloc->alloc = cros_gralloc_alloc;
+	alloc->free = cros_gralloc_free;
+	alloc->common.tag = HARDWARE_DEVICE_TAG;
+	alloc->common.version = 0;
+	alloc->common.module = (hw_module_t *)mod;
+	alloc->common.close = cros_gralloc_close;
+
+	*dev = &alloc->common;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
diff --git a/cros_gralloc/cros_gralloc.h b/cros_gralloc/cros_gralloc.h
new file mode 100644
index 0000000..deca856
--- /dev/null
+++ b/cros_gralloc/cros_gralloc.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GBM_GRALLOC_H
+#define GBM_GRALLOC_H
+
+#include "cros_gralloc_helpers.h"
+
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+
+struct cros_gralloc_bo {
+	struct bo *bo;
+	int32_t refcount;
+	struct cros_gralloc_handle *hnd;
+	struct map_info *map_data;
+	int32_t lockcount;
+};
+
+struct handle_info {
+	cros_gralloc_bo *bo;
+	int32_t registrations;
+};
+
+struct cros_gralloc_module {
+	gralloc_module_t base;
+	struct driver *drv;
+	std::mutex mutex;
+	std::unordered_map<cros_gralloc_handle *, handle_info> handles;
+	std::unordered_map<uint32_t, cros_gralloc_bo *> buffers;
+};
+
+int cros_gralloc_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev);
+
+int cros_gralloc_validate_reference(struct cros_gralloc_module *mod,
+				    struct cros_gralloc_handle *hnd, struct cros_gralloc_bo **obj);
+
+int cros_gralloc_decrement_reference_count(struct cros_gralloc_module *mod,
+					   struct cros_gralloc_bo *obj);
+
+#endif
diff --git a/cros_gralloc/cros_gralloc_handle.h b/cros_gralloc/cros_gralloc_handle.h
new file mode 100644
index 0000000..e2c0bcc
--- /dev/null
+++ b/cros_gralloc/cros_gralloc_handle.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_HANDLE_H
+#define CROS_GRALLOC_HANDLE_H
+
+#include <cstdint>
+#include <cutils/native_handle.h>
+
+#define DRV_MAX_PLANES 4
+
+/*
+ * Only use 32-bit integers in the handle. This guarantees that the handle is
+ * densely packed (i.e, the compiler does not insert any padding).
+ */
+
+struct cros_gralloc_handle {
+	native_handle_t base;
+	int32_t fds[DRV_MAX_PLANES];
+	uint32_t strides[DRV_MAX_PLANES];
+	uint32_t offsets[DRV_MAX_PLANES];
+	uint32_t sizes[DRV_MAX_PLANES];
+	uint32_t format_modifiers[2 * DRV_MAX_PLANES];
+	uint32_t width;
+	uint32_t height;
+	uint32_t format; /* DRM format */
+	uint32_t magic;
+	uint32_t pixel_stride;
+	int32_t droid_format;
+	int32_t usage; /* Android usage. */
+};
+
+#endif
diff --git a/cros_gralloc/cros_gralloc_helpers.cc b/cros_gralloc/cros_gralloc_helpers.cc
new file mode 100644
index 0000000..6504b10
--- /dev/null
+++ b/cros_gralloc/cros_gralloc_helpers.cc
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc_helpers.h"
+
+#include <cstdlib>
+#include <cutils/log.h>
+#include <fcntl.h>
+#include <xf86drm.h>
+
+uint64_t cros_gralloc_convert_flags(int flags)
+{
+	uint64_t usage = BO_USE_NONE;
+
+	if (flags & GRALLOC_USAGE_CURSOR)
+		usage |= BO_USE_NONE;
+	if ((flags & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_RARELY)
+		usage |= BO_USE_SW_READ_RARELY;
+	if ((flags & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
+		usage |= BO_USE_SW_READ_OFTEN;
+	if ((flags & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_RARELY)
+		usage |= BO_USE_SW_WRITE_RARELY;
+	if ((flags & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_OFTEN)
+		usage |= BO_USE_SW_WRITE_OFTEN;
+	if (flags & GRALLOC_USAGE_HW_TEXTURE)
+		usage |= BO_USE_TEXTURE;
+	if (flags & GRALLOC_USAGE_HW_RENDER)
+		usage |= BO_USE_RENDERING;
+	if (flags & GRALLOC_USAGE_HW_2D)
+		usage |= BO_USE_RENDERING;
+	if (flags & GRALLOC_USAGE_HW_COMPOSER)
+		/* HWC wants to use display hardware, but can defer to OpenGL. */
+		usage |= BO_USE_SCANOUT | BO_USE_TEXTURE;
+	if (flags & GRALLOC_USAGE_HW_FB)
+		usage |= BO_USE_NONE;
+	if (flags & GRALLOC_USAGE_EXTERNAL_DISP)
+		/* We're ignoring this flag until we decide what to with display link */
+		usage |= BO_USE_NONE;
+	if (flags & GRALLOC_USAGE_PROTECTED)
+		usage |= BO_USE_PROTECTED;
+	if (flags & GRALLOC_USAGE_HW_VIDEO_ENCODER)
+		/*HACK: See b/30054495 */
+		usage |= BO_USE_SW_READ_OFTEN;
+	if (flags & GRALLOC_USAGE_HW_CAMERA_WRITE)
+		usage |= BO_USE_HW_CAMERA_WRITE;
+	if (flags & GRALLOC_USAGE_HW_CAMERA_READ)
+		usage |= BO_USE_HW_CAMERA_READ;
+	if (flags & GRALLOC_USAGE_HW_CAMERA_ZSL)
+		usage |= BO_USE_HW_CAMERA_ZSL;
+	if (flags & GRALLOC_USAGE_RENDERSCRIPT)
+		/* We use CPU for compute. */
+		usage |= BO_USE_LINEAR;
+
+	return usage;
+}
+
+uint32_t cros_gralloc_convert_format(int format)
+{
+	/*
+	 * Conversion from HAL to fourcc-based DRV formats based on
+	 * platform_android.c in mesa.
+	 */
+
+	switch (format) {
+	case HAL_PIXEL_FORMAT_BGRA_8888:
+		return DRM_FORMAT_ARGB8888;
+	case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+		return DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED;
+	case HAL_PIXEL_FORMAT_RGB_565:
+		return DRM_FORMAT_RGB565;
+	case HAL_PIXEL_FORMAT_RGB_888:
+		return DRM_FORMAT_RGB888;
+	case HAL_PIXEL_FORMAT_RGBA_8888:
+		return DRM_FORMAT_ABGR8888;
+	case HAL_PIXEL_FORMAT_RGBX_8888:
+		return DRM_FORMAT_XBGR8888;
+	case HAL_PIXEL_FORMAT_YCbCr_420_888:
+		return DRM_FORMAT_FLEX_YCbCr_420_888;
+	case HAL_PIXEL_FORMAT_YV12:
+		return DRM_FORMAT_YVU420_ANDROID;
+	/*
+	 * Choose DRM_FORMAT_R8 because <system/graphics.h> requires the buffers
+	 * with a format HAL_PIXEL_FORMAT_BLOB have a height of 1, and width
+	 * equal to their size in bytes.
+	 */
+	case HAL_PIXEL_FORMAT_BLOB:
+		return DRM_FORMAT_R8;
+	}
+
+	return DRM_FORMAT_NONE;
+}
+
+static int32_t cros_gralloc_query_rendernode(struct driver **drv, const char *undesired)
+{
+	/*
+	 * Create a driver from rendernode while filtering out
+	 * the specified undesired driver.
+	 *
+	 * TODO(gsingh): Enable render nodes on udl/evdi.
+	 */
+
+	int fd;
+	drmVersionPtr version;
+	char const *str = "%s/renderD%d";
+	int32_t num_nodes = 63;
+	int32_t min_node = 128;
+	int32_t max_node = (min_node + num_nodes);
+
+	for (int i = min_node; i < max_node; i++) {
+		char *node;
+
+		if (asprintf(&node, str, DRM_DIR_NAME, i) < 0)
+			continue;
+
+		fd = open(node, O_RDWR, 0);
+		free(node);
+
+		if (fd < 0)
+			continue;
+
+		version = drmGetVersion(fd);
+		if (!version)
+			continue;
+
+		if (undesired && !strcmp(version->name, undesired)) {
+			drmFreeVersion(version);
+			continue;
+		}
+
+		drmFreeVersion(version);
+		*drv = drv_create(fd);
+
+		if (*drv)
+			return CROS_GRALLOC_ERROR_NONE;
+	}
+
+	return CROS_GRALLOC_ERROR_NO_RESOURCES;
+}
+
+int32_t cros_gralloc_rendernode_open(struct driver **drv)
+{
+	int32_t ret;
+	ret = cros_gralloc_query_rendernode(drv, "vgem");
+
+	/* Allow vgem driver if no hardware is found. */
+	if (ret)
+		ret = cros_gralloc_query_rendernode(drv, NULL);
+
+	return ret;
+}
+
+int32_t cros_gralloc_validate_handle(struct cros_gralloc_handle *hnd)
+{
+	if (!hnd || hnd->magic != cros_gralloc_magic)
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+void cros_gralloc_log(const char *prefix, const char *file, int line, const char *format, ...)
+{
+	char buf[50];
+	snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
+
+	va_list args;
+	va_start(args, format);
+	__android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
+	va_end(args);
+}
diff --git a/cros_gralloc/cros_gralloc_helpers.h b/cros_gralloc/cros_gralloc_helpers.h
new file mode 100644
index 0000000..457fcac
--- /dev/null
+++ b/cros_gralloc/cros_gralloc_helpers.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_HELPERS_H
+#define CROS_GRALLOC_HELPERS_H
+
+#include "../drv.h"
+#include "cros_gralloc_handle.h"
+
+#include <hardware/gralloc.h>
+#include <system/graphics.h>
+
+/* Use these error codes derived from gralloc1 to make transition easier when
+ * it happens
+ */
+typedef enum {
+	CROS_GRALLOC_ERROR_NONE = 0,
+	CROS_GRALLOC_ERROR_BAD_DESCRIPTOR = 1,
+	CROS_GRALLOC_ERROR_BAD_HANDLE = 2,
+	CROS_GRALLOC_ERROR_BAD_VALUE = 3,
+	CROS_GRALLOC_ERROR_NOT_SHARED = 4,
+	CROS_GRALLOC_ERROR_NO_RESOURCES = 5,
+	CROS_GRALLOC_ERROR_UNDEFINED = 6,
+	CROS_GRALLOC_ERROR_UNSUPPORTED = 7,
+} cros_gralloc_error_t;
+
+/* This enumeration must match the one in <gralloc_drm.h>.
+ * The functions supported by this gralloc's temporary private API are listed
+ * below. Use of these functions is highly discouraged and should only be
+ * reserved for cases where no alternative to get same information (such as
+ * querying ANativeWindow) exists.
+ */
+// clang-format off
+enum {
+	GRALLOC_DRM_GET_STRIDE,
+	GRALLOC_DRM_GET_FORMAT,
+	GRALLOC_DRM_GET_DIMENSIONS,
+	GRALLOC_DRM_GET_BACKING_STORE,
+};
+// clang-format on
+
+constexpr uint32_t cros_gralloc_magic = 0xABCDDCBA;
+
+constexpr uint32_t handle_data_size =
+    ((sizeof(struct cros_gralloc_handle) - offsetof(cros_gralloc_handle, fds[0])) / sizeof(int));
+
+constexpr uint32_t sw_access = GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK;
+
+uint64_t cros_gralloc_convert_flags(int flags);
+
+uint32_t cros_gralloc_convert_format(int format);
+
+int32_t cros_gralloc_rendernode_open(struct driver **drv);
+
+int32_t cros_gralloc_validate_handle(struct cros_gralloc_handle *hnd);
+
+/* Logging code adapted from bsdrm */
+__attribute__((format(printf, 4, 5))) void cros_gralloc_log(const char *prefix, const char *file,
+							    int line, const char *format, ...);
+
+#define cros_gralloc_error(...)                                                                    \
+	do {                                                                                       \
+		cros_gralloc_log("CROS_GRALLOC_ERROR", __FILE__, __LINE__, __VA_ARGS__);           \
+	} while (0)
+
+#endif
diff --git a/cros_gralloc/cros_gralloc_module.cc b/cros_gralloc/cros_gralloc_module.cc
new file mode 100644
index 0000000..7123da2
--- /dev/null
+++ b/cros_gralloc/cros_gralloc_module.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc.h"
+
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+int cros_gralloc_validate_reference(struct cros_gralloc_module *mod,
+				    struct cros_gralloc_handle *hnd, struct cros_gralloc_bo **bo)
+{
+	if (!mod->handles.count(hnd))
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+
+	*bo = mod->handles[hnd].bo;
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+int cros_gralloc_decrement_reference_count(struct cros_gralloc_module *mod,
+					   struct cros_gralloc_bo *bo)
+{
+	if (bo->refcount <= 0) {
+		cros_gralloc_error("The reference count is <= 0.");
+		assert(0);
+	}
+
+	if (!--bo->refcount) {
+		mod->buffers.erase(drv_bo_get_plane_handle(bo->bo, 0).u32);
+		drv_bo_destroy(bo->bo);
+
+		if (bo->hnd) {
+			mod->handles.erase(bo->hnd);
+			native_handle_close(&bo->hnd->base);
+			delete bo->hnd;
+		}
+
+		delete bo;
+	}
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_register_buffer(struct gralloc_module_t const *module,
+					buffer_handle_t handle)
+{
+	uint32_t id;
+	struct cros_gralloc_bo *bo;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	auto mod = (struct cros_gralloc_module *)module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (!mod->drv) {
+		if (cros_gralloc_rendernode_open(&mod->drv)) {
+			cros_gralloc_error("Failed to open render node.");
+			return CROS_GRALLOC_ERROR_NO_RESOURCES;
+		}
+	}
+
+	if (!cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		bo->refcount++;
+		mod->handles[hnd].registrations++;
+		return CROS_GRALLOC_ERROR_NONE;
+	}
+
+	if (drmPrimeFDToHandle(drv_get_fd(mod->drv), hnd->fds[0], &id)) {
+		cros_gralloc_error("drmPrimeFDToHandle failed.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (mod->buffers.count(id)) {
+		bo = mod->buffers[id];
+		bo->refcount++;
+	} else {
+		struct drv_import_fd_data data;
+		data.format = hnd->format;
+		data.width = hnd->width;
+		data.height = hnd->height;
+
+		memcpy(data.fds, hnd->fds, sizeof(data.fds));
+		memcpy(data.strides, hnd->strides, sizeof(data.strides));
+		memcpy(data.offsets, hnd->offsets, sizeof(data.offsets));
+		memcpy(data.sizes, hnd->sizes, sizeof(data.sizes));
+		for (uint32_t p = 0; p < DRV_MAX_PLANES; p++) {
+			data.format_modifiers[p] =
+			    static_cast<uint64_t>(hnd->format_modifiers[2 * p]) << 32;
+			data.format_modifiers[p] |= hnd->format_modifiers[2 * p + 1];
+		}
+
+		bo = new cros_gralloc_bo();
+		bo->bo = drv_bo_import(mod->drv, &data);
+		if (!bo->bo) {
+			delete bo;
+			return CROS_GRALLOC_ERROR_NO_RESOURCES;
+		}
+
+		id = drv_bo_get_plane_handle(bo->bo, 0).u32;
+		mod->buffers[id] = bo;
+
+		bo->refcount = 1;
+	}
+
+	mod->handles[hnd].bo = bo;
+	mod->handles[hnd].registrations = 1;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_unregister_buffer(struct gralloc_module_t const *module,
+					  buffer_handle_t handle)
+{
+	struct cros_gralloc_bo *bo;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	auto mod = (struct cros_gralloc_module *)module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (mod->handles[hnd].registrations <= 0) {
+		cros_gralloc_error("Handle not registered.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	mod->handles[hnd].registrations--;
+
+	if (!mod->handles[hnd].registrations)
+		mod->handles.erase(hnd);
+
+	return cros_gralloc_decrement_reference_count(mod, bo);
+}
+
+static int cros_gralloc_lock(struct gralloc_module_t const *module, buffer_handle_t handle,
+			     int usage, int l, int t, int w, int h, void **vaddr)
+{
+	struct cros_gralloc_bo *bo;
+	auto mod = (struct cros_gralloc_module *)module;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if ((hnd->droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888)) {
+		cros_gralloc_error("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (sw_access & usage) {
+		if (bo->map_data) {
+			*vaddr = bo->map_data->addr;
+		} else {
+			*vaddr = drv_bo_map(bo->bo, 0, 0, drv_bo_get_width(bo->bo),
+					    drv_bo_get_height(bo->bo), 0, &bo->map_data, 0);
+		}
+
+		if (*vaddr == MAP_FAILED) {
+			cros_gralloc_error("Mapping failed.");
+			return CROS_GRALLOC_ERROR_UNSUPPORTED;
+		}
+	}
+
+	bo->lockcount++;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_unlock(struct gralloc_module_t const *module, buffer_handle_t handle)
+{
+	struct cros_gralloc_bo *bo;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	auto mod = (struct cros_gralloc_module *)module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (!--bo->lockcount && bo->map_data) {
+		drv_bo_unmap(bo->bo, bo->map_data);
+		bo->map_data = NULL;
+	}
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_perform(struct gralloc_module_t const *module, int op, ...)
+{
+	va_list args;
+	struct cros_gralloc_bo *bo;
+	int32_t *out_format;
+	uint64_t *out_store;
+	buffer_handle_t handle;
+	uint32_t *out_width, *out_height, *out_stride;
+	auto mod = (struct cros_gralloc_module *)module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	switch (op) {
+	case GRALLOC_DRM_GET_STRIDE:
+	case GRALLOC_DRM_GET_FORMAT:
+	case GRALLOC_DRM_GET_DIMENSIONS:
+	case GRALLOC_DRM_GET_BACKING_STORE:
+		break;
+	default:
+		return CROS_GRALLOC_ERROR_UNSUPPORTED;
+	}
+
+	va_start(args, op);
+	handle = va_arg(args, buffer_handle_t);
+	auto hnd = (struct cros_gralloc_handle *)handle;
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	switch (op) {
+	case GRALLOC_DRM_GET_STRIDE:
+		out_stride = va_arg(args, uint32_t *);
+		*out_stride = hnd->pixel_stride;
+		break;
+	case GRALLOC_DRM_GET_FORMAT:
+		out_format = va_arg(args, int32_t *);
+		*out_format = hnd->droid_format;
+		break;
+	case GRALLOC_DRM_GET_DIMENSIONS:
+		out_width = va_arg(args, uint32_t *);
+		out_height = va_arg(args, uint32_t *);
+		*out_width = hnd->width;
+		*out_height = hnd->height;
+		break;
+	case GRALLOC_DRM_GET_BACKING_STORE:
+		out_store = va_arg(args, uint64_t *);
+		*out_store = drv_bo_get_plane_handle(bo->bo, 0).u64;
+		break;
+	default:
+		return CROS_GRALLOC_ERROR_UNSUPPORTED;
+	}
+
+	va_end(args);
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static int cros_gralloc_lock_ycbcr(struct gralloc_module_t const *module, buffer_handle_t handle,
+				   int usage, int l, int t, int w, int h,
+				   struct android_ycbcr *ycbcr)
+{
+	uint8_t *addr = NULL;
+	size_t offsets[DRV_MAX_PLANES];
+	struct cros_gralloc_bo *bo;
+	auto hnd = (struct cros_gralloc_handle *)handle;
+	auto mod = (struct cros_gralloc_module *)module;
+	std::lock_guard<std::mutex> lock(mod->mutex);
+
+	if (cros_gralloc_validate_handle(hnd)) {
+		cros_gralloc_error("Invalid handle.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
+		cros_gralloc_error("Invalid Reference.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) &&
+	    (hnd->droid_format != HAL_PIXEL_FORMAT_YV12)) {
+		cros_gralloc_error("Non-YUV format not compatible.");
+		return CROS_GRALLOC_ERROR_BAD_HANDLE;
+	}
+
+	if (sw_access & usage) {
+		void *vaddr;
+		if (bo->map_data) {
+			vaddr = bo->map_data->addr;
+		} else {
+			vaddr = drv_bo_map(bo->bo, 0, 0, drv_bo_get_width(bo->bo),
+					   drv_bo_get_height(bo->bo), 0, &bo->map_data, 0);
+		}
+
+		if (vaddr == MAP_FAILED) {
+			cros_gralloc_error("Mapping failed.");
+			return CROS_GRALLOC_ERROR_UNSUPPORTED;
+		}
+
+		addr = static_cast<uint8_t *>(vaddr);
+	}
+
+	for (size_t p = 0; p < drv_bo_get_num_planes(bo->bo); p++)
+		offsets[p] = drv_bo_get_plane_offset(bo->bo, p);
+
+	switch (hnd->format) {
+	case DRM_FORMAT_NV12:
+		ycbcr->y = addr;
+		ycbcr->cb = addr + offsets[1];
+		ycbcr->cr = addr + offsets[1] + 1;
+		ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
+		ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 1);
+		ycbcr->chroma_step = 2;
+		break;
+	case DRM_FORMAT_YVU420_ANDROID:
+		ycbcr->y = addr;
+		ycbcr->cb = addr + offsets[2];
+		ycbcr->cr = addr + offsets[1];
+		ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
+		ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 1);
+		ycbcr->chroma_step = 1;
+		break;
+	case DRM_FORMAT_UYVY:
+		ycbcr->y = addr + 1;
+		ycbcr->cb = addr;
+		ycbcr->cr = addr + 2;
+		ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
+		ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 0);
+		ycbcr->chroma_step = 2;
+		break;
+	default:
+		return CROS_GRALLOC_ERROR_UNSUPPORTED;
+	}
+
+	bo->lockcount++;
+
+	return CROS_GRALLOC_ERROR_NONE;
+}
+
+static struct hw_module_methods_t cros_gralloc_module_methods = {.open = cros_gralloc_open };
+
+struct cros_gralloc_module HAL_MODULE_INFO_SYM = {
+	.base =
+	    {
+		.common =
+		    {
+			.tag = HARDWARE_MODULE_TAG,
+			.module_api_version = GRALLOC_MODULE_API_VERSION_0_2,
+			.hal_api_version = 0,
+			.id = GRALLOC_HARDWARE_MODULE_ID,
+			.name = "CrOS Gralloc",
+			.author = "Chrome OS",
+			.methods = &cros_gralloc_module_methods,
+		    },
+		.registerBuffer = cros_gralloc_register_buffer,
+		.unregisterBuffer = cros_gralloc_unregister_buffer,
+		.lock = cros_gralloc_lock,
+		.unlock = cros_gralloc_unlock,
+		.perform = cros_gralloc_perform,
+		.lock_ycbcr = cros_gralloc_lock_ycbcr,
+	    },
+
+	.drv = NULL,
+};
diff --git a/drv.c b/drv.c
new file mode 100644
index 0000000..bcc8cfa
--- /dev/null
+++ b/drv.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#ifdef DRV_AMDGPU
+extern struct backend backend_amdgpu;
+#endif
+extern struct backend backend_cirrus;
+extern struct backend backend_evdi;
+#ifdef DRV_EXYNOS
+extern struct backend backend_exynos;
+#endif
+extern struct backend backend_gma500;
+#ifdef DRV_I915
+extern struct backend backend_i915;
+#endif
+#ifdef DRV_MARVELL
+extern struct backend backend_marvell;
+#endif
+#ifdef DRV_MEDIATEK
+extern struct backend backend_mediatek;
+#endif
+extern struct backend backend_nouveau;
+#ifdef DRV_ROCKCHIP
+extern struct backend backend_rockchip;
+#endif
+#ifdef DRV_TEGRA
+extern struct backend backend_tegra;
+#endif
+extern struct backend backend_udl;
+#ifdef DRV_VC4
+extern struct backend backend_vc4;
+#endif
+extern struct backend backend_vgem;
+extern struct backend backend_virtio_gpu;
+
+static struct backend *drv_get_backend(int fd)
+{
+	drmVersionPtr drm_version;
+	unsigned int i;
+
+	drm_version = drmGetVersion(fd);
+
+	if (!drm_version)
+		return NULL;
+
+	struct backend *backend_list[] = {
+#ifdef DRV_AMDGPU
+		&backend_amdgpu,
+#endif
+		&backend_cirrus,   &backend_evdi,
+#ifdef DRV_EXYNOS
+		&backend_exynos,
+#endif
+		&backend_gma500,
+#ifdef DRV_I915
+		&backend_i915,
+#endif
+#ifdef DRV_MARVELL
+		&backend_marvell,
+#endif
+#ifdef DRV_MEDIATEK
+		&backend_mediatek,
+#endif
+		&backend_nouveau,
+#ifdef DRV_ROCKCHIP
+		&backend_rockchip,
+#endif
+#ifdef DRV_TEGRA
+		&backend_tegra,
+#endif
+		&backend_udl,
+#ifdef DRV_VC4
+		&backend_vc4,
+#endif
+		&backend_vgem,     &backend_virtio_gpu,
+	};
+
+	for (i = 0; i < ARRAY_SIZE(backend_list); i++)
+		if (!strcmp(drm_version->name, backend_list[i]->name)) {
+			drmFreeVersion(drm_version);
+			return backend_list[i];
+		}
+
+	drmFreeVersion(drm_version);
+	return NULL;
+}
+
+struct driver *drv_create(int fd)
+{
+	struct driver *drv;
+	int ret;
+
+	drv = (struct driver *)calloc(1, sizeof(*drv));
+
+	if (!drv)
+		return NULL;
+
+	drv->fd = fd;
+	drv->backend = drv_get_backend(fd);
+
+	if (!drv->backend)
+		goto free_driver;
+
+	if (pthread_mutex_init(&drv->driver_lock, NULL))
+		goto free_driver;
+
+	drv->buffer_table = drmHashCreate();
+	if (!drv->buffer_table)
+		goto free_lock;
+
+	drv->map_table = drmHashCreate();
+	if (!drv->map_table)
+		goto free_buffer_table;
+
+	/* Start with a power of 2 number of allocations. */
+	drv->backend->combos.allocations = 2;
+	drv->backend->combos.size = 0;
+	drv->backend->combos.data =
+	    calloc(drv->backend->combos.allocations, sizeof(struct combination));
+	if (!drv->backend->combos.data)
+		goto free_map_table;
+
+	if (drv->backend->init) {
+		ret = drv->backend->init(drv);
+		if (ret) {
+			free(drv->backend->combos.data);
+			goto free_map_table;
+		}
+	}
+
+	return drv;
+
+free_map_table:
+	drmHashDestroy(drv->map_table);
+free_buffer_table:
+	drmHashDestroy(drv->buffer_table);
+free_lock:
+	pthread_mutex_destroy(&drv->driver_lock);
+free_driver:
+	free(drv);
+	return NULL;
+}
+
+void drv_destroy(struct driver *drv)
+{
+	pthread_mutex_lock(&drv->driver_lock);
+
+	if (drv->backend->close)
+		drv->backend->close(drv);
+
+	drmHashDestroy(drv->buffer_table);
+	drmHashDestroy(drv->map_table);
+
+	free(drv->backend->combos.data);
+
+	pthread_mutex_unlock(&drv->driver_lock);
+	pthread_mutex_destroy(&drv->driver_lock);
+
+	free(drv);
+}
+
+int drv_get_fd(struct driver *drv)
+{
+	return drv->fd;
+}
+
+const char *drv_get_name(struct driver *drv)
+{
+	return drv->backend->name;
+}
+
+struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t usage)
+{
+	struct combination *curr, *best;
+
+	if (format == DRM_FORMAT_NONE || usage == BO_USE_NONE)
+		return 0;
+
+	best = NULL;
+	uint32_t i;
+	for (i = 0; i < drv->backend->combos.size; i++) {
+		curr = &drv->backend->combos.data[i];
+		if ((format == curr->format) && usage == (curr->usage & usage))
+			if (!best || best->metadata.priority < curr->metadata.priority)
+				best = curr;
+	}
+
+	return best;
+}
+
+struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format)
+{
+
+	struct bo *bo;
+	bo = (struct bo *)calloc(1, sizeof(*bo));
+
+	if (!bo)
+		return NULL;
+
+	bo->drv = drv;
+	bo->width = width;
+	bo->height = height;
+	bo->format = format;
+	bo->num_planes = drv_num_planes_from_format(format);
+
+	if (!bo->num_planes) {
+		free(bo);
+		return NULL;
+	}
+
+	return bo;
+}
+
+struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
+			 uint64_t flags)
+{
+	int ret;
+	size_t plane;
+	struct bo *bo;
+
+	bo = drv_bo_new(drv, width, height, format);
+
+	if (!bo)
+		return NULL;
+
+	ret = drv->backend->bo_create(bo, width, height, format, flags);
+
+	if (ret) {
+		free(bo);
+		return NULL;
+	}
+
+	pthread_mutex_lock(&drv->driver_lock);
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		drv_increment_reference_count(drv, bo, plane);
+
+	pthread_mutex_unlock(&drv->driver_lock);
+
+	return bo;
+}
+
+struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
+					uint32_t format, const uint64_t *modifiers, uint32_t count)
+{
+	int ret;
+	size_t plane;
+	struct bo *bo;
+
+	if (!drv->backend->bo_create_with_modifiers) {
+		errno = ENOENT;
+		return NULL;
+	}
+
+	bo = drv_bo_new(drv, width, height, format);
+
+	if (!bo)
+		return NULL;
+
+	ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, count);
+
+	if (ret) {
+		free(bo);
+		return NULL;
+	}
+
+	pthread_mutex_lock(&drv->driver_lock);
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		drv_increment_reference_count(drv, bo, plane);
+
+	pthread_mutex_unlock(&drv->driver_lock);
+
+	return bo;
+}
+
+void drv_bo_destroy(struct bo *bo)
+{
+	size_t plane;
+	uintptr_t total = 0;
+	struct driver *drv = bo->drv;
+
+	pthread_mutex_lock(&drv->driver_lock);
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		drv_decrement_reference_count(drv, bo, plane);
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		total += drv_get_reference_count(drv, bo, plane);
+
+	pthread_mutex_unlock(&drv->driver_lock);
+
+	if (total == 0)
+		bo->drv->backend->bo_destroy(bo);
+
+	free(bo);
+}
+
+struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
+{
+	int ret;
+	size_t plane;
+	struct bo *bo;
+
+	bo = drv_bo_new(drv, data->width, data->height, data->format);
+
+	if (!bo)
+		return NULL;
+
+	ret = drv->backend->bo_import(bo, data);
+	if (ret) {
+		free(bo);
+		return NULL;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		bo->strides[plane] = data->strides[plane];
+		bo->offsets[plane] = data->offsets[plane];
+		bo->sizes[plane] = data->sizes[plane];
+		bo->format_modifiers[plane] = data->format_modifiers[plane];
+		bo->total_size += data->sizes[plane];
+	}
+
+	return bo;
+}
+
+void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+		 uint32_t flags, struct map_info **map_data, size_t plane)
+{
+	void *ptr;
+	uint8_t *addr;
+	size_t offset;
+	struct map_info *data;
+
+	assert(width > 0);
+	assert(height > 0);
+	assert(x + width <= drv_bo_get_width(bo));
+	assert(y + height <= drv_bo_get_height(bo));
+
+	pthread_mutex_lock(&bo->drv->driver_lock);
+
+	if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
+		data = (struct map_info *)ptr;
+		data->refcount++;
+		goto success;
+	}
+
+	data = calloc(1, sizeof(*data));
+	addr = bo->drv->backend->bo_map(bo, data, plane);
+	if (addr == MAP_FAILED) {
+		*map_data = NULL;
+		free(data);
+		pthread_mutex_unlock(&bo->drv->driver_lock);
+		return MAP_FAILED;
+	}
+
+	data->refcount = 1;
+	data->addr = addr;
+	data->handle = bo->handles[plane].u32;
+	drmHashInsert(bo->drv->map_table, bo->handles[plane].u32, (void *)data);
+
+success:
+	*map_data = data;
+	offset = drv_bo_get_plane_stride(bo, plane) * y;
+	offset += drv_stride_from_format(bo->format, x, plane);
+	addr = (uint8_t *)data->addr;
+	addr += drv_bo_get_plane_offset(bo, plane) + offset;
+	pthread_mutex_unlock(&bo->drv->driver_lock);
+
+	return (void *)addr;
+}
+
+int drv_bo_unmap(struct bo *bo, struct map_info *data)
+{
+	int ret = 0;
+
+	assert(data);
+	assert(data->refcount >= 0);
+
+	pthread_mutex_lock(&bo->drv->driver_lock);
+
+	if (!--data->refcount) {
+		if (bo->drv->backend->bo_unmap)
+			ret = bo->drv->backend->bo_unmap(bo, data);
+		else
+			ret = munmap(data->addr, data->length);
+		drmHashDelete(bo->drv->map_table, data->handle);
+		free(data);
+	}
+
+	pthread_mutex_unlock(&bo->drv->driver_lock);
+
+	return ret;
+}
+
+uint32_t drv_bo_get_width(struct bo *bo)
+{
+	return bo->width;
+}
+
+uint32_t drv_bo_get_height(struct bo *bo)
+{
+	return bo->height;
+}
+
+uint32_t drv_bo_get_stride_or_tiling(struct bo *bo)
+{
+	return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0);
+}
+
+size_t drv_bo_get_num_planes(struct bo *bo)
+{
+	return bo->num_planes;
+}
+
+union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
+{
+	return bo->handles[plane];
+}
+
+#ifndef DRM_RDWR
+#define DRM_RDWR O_RDWR
+#endif
+
+int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
+{
+
+	int ret, fd;
+	assert(plane < bo->num_planes);
+
+	ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
+
+	return (ret) ? ret : fd;
+}
+
+uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
+{
+	assert(plane < bo->num_planes);
+	return bo->offsets[plane];
+}
+
+uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
+{
+	assert(plane < bo->num_planes);
+	return bo->sizes[plane];
+}
+
+uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
+{
+	assert(plane < bo->num_planes);
+	return bo->strides[plane];
+}
+
+uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
+{
+	assert(plane < bo->num_planes);
+	return bo->format_modifiers[plane];
+}
+
+uint32_t drv_bo_get_format(struct bo *bo)
+{
+	return bo->format;
+}
+
+uint32_t drv_resolve_format(struct driver *drv, uint32_t format)
+{
+	if (drv->backend->resolve_format)
+		return drv->backend->resolve_format(format);
+
+	return format;
+}
+
+size_t drv_num_planes_from_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_GR88:
+	case DRM_FORMAT_R8:
+	case DRM_FORMAT_RG88:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+		return 1;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return 2;
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YVU420_ANDROID:
+		return 3;
+	}
+
+	fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
+	return 0;
+}
+
+uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
+{
+	assert(plane < drv_num_planes_from_format(format));
+	uint32_t vertical_subsampling;
+
+	switch (format) {
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YVU420_ANDROID:
+		vertical_subsampling = (plane == 0) ? 1 : 2;
+		break;
+	default:
+		vertical_subsampling = 1;
+	}
+
+	return stride * DIV_ROUND_UP(height, vertical_subsampling);
+}
+
+uint32_t drv_num_buffers_per_bo(struct bo *bo)
+{
+	uint32_t count = 0;
+	size_t plane, p;
+
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		for (p = 0; p < plane; p++)
+			if (bo->handles[p].u32 == bo->handles[plane].u32)
+				break;
+		if (p == plane)
+			count++;
+	}
+
+	return count;
+}
diff --git a/drv.h b/drv.h
new file mode 100644
index 0000000..f478a66
--- /dev/null
+++ b/drv.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef DRV_H_
+#define DRV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <drm_fourcc.h>
+#include <stdint.h>
+
+#define DRV_MAX_PLANES 4
+
+// clang-format off
+/* Use flags */
+#define BO_USE_NONE			0
+#define BO_USE_SCANOUT			(1ull << 0)
+#define BO_USE_CURSOR			(1ull << 1)
+#define BO_USE_CURSOR_64X64		BO_USE_CURSOR
+#define BO_USE_RENDERING		(1ull << 2)
+#define BO_USE_LINEAR			(1ull << 3)
+#define BO_USE_SW_READ_NEVER		(1ull << 4)
+#define BO_USE_SW_READ_RARELY		(1ull << 5)
+#define BO_USE_SW_READ_OFTEN		(1ull << 6)
+#define BO_USE_SW_WRITE_NEVER		(1ull << 7)
+#define BO_USE_SW_WRITE_RARELY		(1ull << 8)
+#define BO_USE_SW_WRITE_OFTEN		(1ull << 9)
+#define BO_USE_EXTERNAL_DISP		(1ull << 10)
+#define BO_USE_PROTECTED		(1ull << 11)
+#define BO_USE_HW_VIDEO_ENCODER		(1ull << 12)
+#define BO_USE_HW_CAMERA_WRITE		(1ull << 13)
+#define BO_USE_HW_CAMERA_READ		(1ull << 14)
+#define BO_USE_HW_CAMERA_ZSL		(1ull << 15)
+#define BO_USE_RENDERSCRIPT		(1ull << 16)
+#define BO_USE_TEXTURE			(1ull << 17)
+
+/* This is our extension to <drm_fourcc.h>.  We need to make sure we don't step
+ * on the namespace of already defined formats, which can be done by using invalid
+ * fourcc codes.
+ */
+
+#define DRM_FORMAT_NONE				fourcc_code('0', '0', '0', '0')
+#define DRM_FORMAT_YVU420_ANDROID		fourcc_code('9', '9', '9', '7')
+#define DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED	fourcc_code('9', '9', '9', '8')
+#define DRM_FORMAT_FLEX_YCbCr_420_888		fourcc_code('9', '9', '9', '9')
+
+// clang-format on
+struct driver;
+struct bo;
+struct combination;
+
+union bo_handle {
+	void *ptr;
+	int32_t s32;
+	uint32_t u32;
+	int64_t s64;
+	uint64_t u64;
+};
+
+struct drv_import_fd_data {
+	int fds[DRV_MAX_PLANES];
+	uint32_t strides[DRV_MAX_PLANES];
+	uint32_t offsets[DRV_MAX_PLANES];
+	uint32_t sizes[DRV_MAX_PLANES];
+	uint64_t format_modifiers[DRV_MAX_PLANES];
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+};
+
+struct map_info {
+	void *addr;
+	size_t length;
+	uint32_t handle;
+	int32_t refcount;
+	void *priv;
+};
+
+struct driver *drv_create(int fd);
+
+void drv_destroy(struct driver *drv);
+
+int drv_get_fd(struct driver *drv);
+
+const char *drv_get_name(struct driver *drv);
+
+struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t usage);
+
+struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format);
+
+struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
+			 uint64_t flags);
+
+struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
+					uint32_t format, const uint64_t *modifiers, uint32_t count);
+
+void drv_bo_destroy(struct bo *bo);
+
+struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data);
+
+void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+		 uint32_t flags, struct map_info **map_data, size_t plane);
+
+int drv_bo_unmap(struct bo *bo, struct map_info *map_data);
+
+uint32_t drv_bo_get_width(struct bo *bo);
+
+uint32_t drv_bo_get_height(struct bo *bo);
+
+uint32_t drv_bo_get_stride_or_tiling(struct bo *bo);
+
+size_t drv_bo_get_num_planes(struct bo *bo);
+
+union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane);
+
+int drv_bo_get_plane_fd(struct bo *bo, size_t plane);
+
+uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane);
+
+uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane);
+
+uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane);
+
+uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane);
+
+uint32_t drv_bo_get_format(struct bo *bo);
+
+uint32_t drv_bo_get_stride_in_pixels(struct bo *bo);
+
+uint32_t drv_resolve_format(struct driver *drv, uint32_t format);
+
+size_t drv_num_planes_from_format(uint32_t format);
+
+uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
+
+uint32_t drv_num_buffers_per_bo(struct bo *bo);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drv_priv.h b/drv_priv.h
new file mode 100644
index 0000000..df5c8e9
--- /dev/null
+++ b/drv_priv.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef DRV_PRIV_H
+#define DRV_PRIV_H
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "drv.h"
+
+struct bo {
+	struct driver *drv;
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t tiling;
+	size_t num_planes;
+	union bo_handle handles[DRV_MAX_PLANES];
+	uint32_t offsets[DRV_MAX_PLANES];
+	uint32_t sizes[DRV_MAX_PLANES];
+	uint32_t strides[DRV_MAX_PLANES];
+	uint64_t format_modifiers[DRV_MAX_PLANES];
+	size_t total_size;
+	void *priv;
+};
+
+struct driver {
+	int fd;
+	struct backend *backend;
+	void *priv;
+	void *buffer_table;
+	void *map_table;
+	pthread_mutex_t driver_lock;
+};
+
+struct kms_item {
+	uint32_t format;
+	uint64_t modifier;
+	uint64_t usage;
+};
+
+struct format_metadata {
+	uint32_t priority;
+	uint32_t tiling;
+	uint64_t modifier;
+};
+
+struct combination {
+	uint32_t format;
+	struct format_metadata metadata;
+	uint64_t usage;
+};
+
+struct combinations {
+	struct combination *data;
+	uint32_t size;
+	uint32_t allocations;
+};
+
+struct backend {
+	char *name;
+	int (*init)(struct driver *drv);
+	void (*close)(struct driver *drv);
+	int (*bo_create)(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			 uint32_t flags);
+	int (*bo_create_with_modifiers)(struct bo *bo, uint32_t width, uint32_t height,
+					uint32_t format, const uint64_t *modifiers, uint32_t count);
+	int (*bo_destroy)(struct bo *bo);
+	int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data);
+	void *(*bo_map)(struct bo *bo, struct map_info *data, size_t plane);
+	int (*bo_unmap)(struct bo *bo, struct map_info *data);
+	uint32_t (*resolve_format)(uint32_t format);
+	struct combinations combos;
+};
+
+// clang-format off
+#define BO_USE_RENDER_MASK BO_USE_LINEAR | BO_USE_RENDERING | BO_USE_SW_READ_OFTEN | \
+			   BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
+			   BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+
+#define BO_USE_TEXTURE_MASK BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
+			    BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+
+#define LINEAR_METADATA (struct format_metadata) { 0, 1, DRM_FORMAT_MOD_NONE }
+// clang-format on
+
+#endif
diff --git a/evdi.c b/evdi.c
new file mode 100644
index 0000000..f66fb2d
--- /dev/null
+++ b/evdi.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int evdi_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+struct backend backend_evdi = {
+	.name = "evdi",
+	.init = evdi_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
diff --git a/exynos.c b/exynos.c
new file mode 100644
index 0000000..0d935eb
--- /dev/null
+++ b/exynos.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_EXYNOS
+
+// clang-format off
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <xf86drm.h>
+#include <exynos_drm.h>
+// clang-format on
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12 };
+
+static int exynos_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			    uint32_t flags)
+{
+	size_t plane;
+
+	if (format == DRM_FORMAT_NV12) {
+		uint32_t chroma_height;
+		/* V4L2 s5p-mfc requires width to be 16 byte aligned and height 32. */
+		width = ALIGN(width, 16);
+		height = ALIGN(height, 32);
+		chroma_height = ALIGN(height / 2, 32);
+		bo->strides[0] = bo->strides[1] = width;
+		/* MFC v8+ requires 64 byte padding in the end of luma and chroma buffers. */
+		bo->sizes[0] = bo->strides[0] * height + 64;
+		bo->sizes[1] = bo->strides[1] * chroma_height + 64;
+		bo->offsets[0] = bo->offsets[1] = 0;
+		bo->total_size = bo->sizes[0] + bo->sizes[1];
+	} else if (format == DRM_FORMAT_XRGB8888 || format == DRM_FORMAT_ARGB8888) {
+		bo->strides[0] = drv_stride_from_format(format, width, 0);
+		bo->total_size = bo->sizes[0] = height * bo->strides[0];
+		bo->offsets[0] = 0;
+	} else {
+		fprintf(stderr, "drv: unsupported format %X\n", format);
+		assert(0);
+		return -EINVAL;
+	}
+
+	int ret;
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		size_t size = bo->sizes[plane];
+		struct drm_exynos_gem_create gem_create;
+
+		memset(&gem_create, 0, sizeof(gem_create));
+		gem_create.size = size;
+		gem_create.flags = EXYNOS_BO_NONCONTIG;
+
+		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_EXYNOS_GEM_CREATE, &gem_create);
+		if (ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n",
+				size);
+			goto cleanup_planes;
+		}
+
+		bo->handles[plane].u32 = gem_create.handle;
+	}
+
+	return 0;
+
+cleanup_planes:
+	for (; plane != 0; plane--) {
+		struct drm_gem_close gem_close;
+		memset(&gem_close, 0, sizeof(gem_close));
+		gem_close.handle = bo->handles[plane - 1].u32;
+		int gem_close_ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+		if (gem_close_ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret);
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Use dumb mapping with exynos even though a GEM buffer is created.
+ * libdrm does the same thing in exynos_drm.c
+ */
+struct backend backend_exynos = {
+	.name = "exynos",
+	.init = exynos_init,
+	.bo_create = exynos_bo_create,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
+
+#endif
diff --git a/gbm.c b/gbm.c
new file mode 100644
index 0000000..ab576cf
--- /dev/null
+++ b/gbm.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <assert.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <xf86drm.h>
+
+#include "drv.h"
+#include "gbm_helpers.h"
+#include "gbm_priv.h"
+#include "util.h"
+
+PUBLIC int gbm_device_get_fd(struct gbm_device *gbm)
+{
+
+	return drv_get_fd(gbm->drv);
+}
+
+PUBLIC const char *gbm_device_get_backend_name(struct gbm_device *gbm)
+{
+	return drv_get_name(gbm->drv);
+}
+
+PUBLIC int gbm_device_is_format_supported(struct gbm_device *gbm, uint32_t format, uint32_t usage)
+{
+	uint64_t drv_usage;
+
+	if (usage & GBM_BO_USE_CURSOR && usage & GBM_BO_USE_RENDERING)
+		return 0;
+
+	drv_usage = gbm_convert_flags(usage);
+
+	return (drv_get_combination(gbm->drv, format, drv_usage) != NULL);
+}
+
+PUBLIC struct gbm_device *gbm_create_device(int fd)
+{
+	struct gbm_device *gbm;
+
+	gbm = (struct gbm_device *)malloc(sizeof(*gbm));
+
+	if (!gbm)
+		return NULL;
+
+	gbm->drv = drv_create(fd);
+	if (!gbm->drv) {
+		free(gbm);
+		return NULL;
+	}
+
+	return gbm;
+}
+
+PUBLIC void gbm_device_destroy(struct gbm_device *gbm)
+{
+	drv_destroy(gbm->drv);
+	free(gbm);
+}
+
+PUBLIC struct gbm_surface *gbm_surface_create(struct gbm_device *gbm, uint32_t width,
+					      uint32_t height, uint32_t format, uint32_t flags)
+{
+	struct gbm_surface *surface = (struct gbm_surface *)malloc(sizeof(*surface));
+
+	if (!surface)
+		return NULL;
+
+	return surface;
+}
+
+PUBLIC void gbm_surface_destroy(struct gbm_surface *surface)
+{
+	free(surface);
+}
+
+PUBLIC struct gbm_bo *gbm_surface_lock_front_buffer(struct gbm_surface *surface)
+{
+	return NULL;
+}
+
+PUBLIC void gbm_surface_release_buffer(struct gbm_surface *surface, struct gbm_bo *bo)
+{
+}
+
+static struct gbm_bo *gbm_bo_new(struct gbm_device *gbm, uint32_t format)
+{
+	struct gbm_bo *bo;
+
+	bo = (struct gbm_bo *)calloc(1, sizeof(*bo));
+	if (!bo)
+		return NULL;
+
+	bo->gbm = gbm;
+	bo->gbm_format = format;
+
+	return bo;
+}
+
+PUBLIC struct gbm_bo *gbm_bo_create(struct gbm_device *gbm, uint32_t width, uint32_t height,
+				    uint32_t format, uint32_t flags)
+{
+	struct gbm_bo *bo;
+
+	if (!gbm_device_is_format_supported(gbm, format, flags))
+		return NULL;
+
+	bo = gbm_bo_new(gbm, format);
+
+	if (!bo)
+		return NULL;
+
+	bo->bo = drv_bo_create(gbm->drv, width, height, format, gbm_convert_flags(flags));
+
+	if (!bo->bo) {
+		free(bo);
+		return NULL;
+	}
+
+	return bo;
+}
+
+PUBLIC struct gbm_bo *gbm_bo_create_with_modifiers(struct gbm_device *gbm, uint32_t width,
+						   uint32_t height, uint32_t format,
+						   const uint64_t *modifiers, uint32_t count)
+{
+	struct gbm_bo *bo;
+
+	bo = gbm_bo_new(gbm, format);
+
+	if (!bo)
+		return NULL;
+
+	bo->bo = drv_bo_create_with_modifiers(gbm->drv, width, height, format, modifiers, count);
+
+	if (!bo->bo) {
+		free(bo);
+		return NULL;
+	}
+
+	return bo;
+}
+
+PUBLIC void gbm_bo_destroy(struct gbm_bo *bo)
+{
+	if (bo->destroy_user_data) {
+		bo->destroy_user_data(bo, bo->user_data);
+		bo->destroy_user_data = NULL;
+		bo->user_data = NULL;
+	}
+
+	drv_bo_destroy(bo->bo);
+	free(bo);
+}
+
+PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void *buffer,
+				    uint32_t usage)
+{
+	struct gbm_bo *bo;
+	struct drv_import_fd_data drv_data;
+	struct gbm_import_fd_data *fd_data = buffer;
+	struct gbm_import_fd_planar_data *fd_planar_data = buffer;
+	uint32_t gbm_format;
+	size_t num_planes, i;
+
+	memset(&drv_data, 0, sizeof(drv_data));
+
+	switch (type) {
+	case GBM_BO_IMPORT_FD:
+		gbm_format = fd_data->format;
+		drv_data.width = fd_data->width;
+		drv_data.height = fd_data->height;
+		drv_data.format = fd_data->format;
+		drv_data.fds[0] = fd_data->fd;
+		drv_data.strides[0] = fd_data->stride;
+		drv_data.sizes[0] = fd_data->height * fd_data->stride;
+		break;
+	case GBM_BO_IMPORT_FD_PLANAR:
+		gbm_format = fd_planar_data->format;
+		drv_data.width = fd_planar_data->width;
+		drv_data.height = fd_planar_data->height;
+		drv_data.format = fd_planar_data->format;
+		num_planes = drv_num_planes_from_format(drv_data.format);
+
+		assert(num_planes);
+
+		for (i = 0; i < num_planes; i++) {
+			drv_data.fds[i] = fd_planar_data->fds[i];
+			drv_data.offsets[i] = fd_planar_data->offsets[i];
+			drv_data.strides[i] = fd_planar_data->strides[i];
+			drv_data.format_modifiers[i] = fd_planar_data->format_modifiers[i];
+
+			drv_data.sizes[i] = drv_size_from_format(
+			    drv_data.format, drv_data.strides[i], drv_data.height, i);
+		}
+
+		for (i = num_planes; i < GBM_MAX_PLANES; i++)
+			drv_data.fds[i] = -1;
+
+		break;
+	default:
+		return NULL;
+	}
+
+	if (!gbm_device_is_format_supported(gbm, gbm_format, usage))
+		return NULL;
+
+	bo = gbm_bo_new(gbm, gbm_format);
+
+	if (!bo)
+		return NULL;
+
+	bo->bo = drv_bo_import(gbm->drv, &drv_data);
+
+	if (!bo->bo) {
+		free(bo);
+		return NULL;
+	}
+
+	return bo;
+}
+
+PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+			uint32_t flags, uint32_t *stride, void **map_data, size_t plane)
+{
+	if (!bo || width == 0 || height == 0 || !stride || !map_data)
+		return NULL;
+
+	*stride = gbm_bo_get_plane_stride(bo, plane);
+	return drv_bo_map(bo->bo, x, y, width, height, 0, (struct map_info **)map_data, plane);
+}
+
+PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
+{
+	assert(bo);
+	drv_bo_unmap(bo->bo, map_data);
+}
+
+PUBLIC uint32_t gbm_bo_get_width(struct gbm_bo *bo)
+{
+	return drv_bo_get_width(bo->bo);
+}
+
+PUBLIC uint32_t gbm_bo_get_height(struct gbm_bo *bo)
+{
+	return drv_bo_get_height(bo->bo);
+}
+
+PUBLIC uint32_t gbm_bo_get_stride(struct gbm_bo *bo)
+{
+	return gbm_bo_get_plane_stride(bo, 0);
+}
+
+PUBLIC uint32_t gbm_bo_get_stride_or_tiling(struct gbm_bo *bo)
+{
+	return drv_bo_get_stride_or_tiling(bo->bo);
+}
+
+PUBLIC uint32_t gbm_bo_get_format(struct gbm_bo *bo)
+{
+	return bo->gbm_format;
+}
+
+PUBLIC uint64_t gbm_bo_get_format_modifier(struct gbm_bo *bo)
+{
+	return gbm_bo_get_plane_format_modifier(bo, 0);
+}
+
+PUBLIC struct gbm_device *gbm_bo_get_device(struct gbm_bo *bo)
+{
+	return bo->gbm;
+}
+
+PUBLIC union gbm_bo_handle gbm_bo_get_handle(struct gbm_bo *bo)
+{
+	return gbm_bo_get_plane_handle(bo, 0);
+}
+
+PUBLIC int gbm_bo_get_fd(struct gbm_bo *bo)
+{
+	return gbm_bo_get_plane_fd(bo, 0);
+}
+
+PUBLIC size_t gbm_bo_get_num_planes(struct gbm_bo *bo)
+{
+	return drv_bo_get_num_planes(bo->bo);
+}
+
+PUBLIC union gbm_bo_handle gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane)
+{
+	return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, plane).u64;
+}
+
+PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane)
+{
+	return drv_bo_get_plane_fd(bo->bo, plane);
+}
+
+PUBLIC uint32_t gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane)
+{
+	return drv_bo_get_plane_offset(bo->bo, plane);
+}
+
+PUBLIC uint32_t gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane)
+{
+	return drv_bo_get_plane_size(bo->bo, plane);
+}
+
+PUBLIC uint32_t gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane)
+{
+	return drv_bo_get_plane_stride(bo->bo, plane);
+}
+
+PUBLIC uint64_t gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane)
+{
+	return drv_bo_get_plane_format_modifier(bo->bo, plane);
+}
+
+PUBLIC void gbm_bo_set_user_data(struct gbm_bo *bo, void *data,
+				 void (*destroy_user_data)(struct gbm_bo *, void *))
+{
+	bo->user_data = data;
+	bo->destroy_user_data = destroy_user_data;
+}
+
+PUBLIC void *gbm_bo_get_user_data(struct gbm_bo *bo)
+{
+	return bo->user_data;
+}
diff --git a/gbm.h b/gbm.h
new file mode 100644
index 0000000..1fb338f
--- /dev/null
+++ b/gbm.h
@@ -0,0 +1,413 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Franzke <benjaminfranzke@googlemail.com>
+ */
+
+#ifndef _GBM_H_
+#define _GBM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define __GBM__ 1
+
+#include <stddef.h>
+#include <stdint.h>
+
+/**
+ * \file gbm.h
+ * \brief Generic Buffer Manager
+ */
+
+struct gbm_device;
+struct gbm_bo;
+struct gbm_surface;
+
+/**
+ * \mainpage The Generic Buffer Manager
+ *
+ * This module provides an abstraction that the caller can use to request a
+ * buffer from the underlying memory management system for the platform.
+ *
+ * This allows the creation of portable code whilst still allowing access to
+ * the underlying memory manager.
+ */
+
+/**
+ * Abstraction representing the handle to a buffer allocated by the
+ * manager
+ */
+union gbm_bo_handle {
+   void *ptr;
+   int32_t s32;
+   uint32_t u32;
+   int64_t s64;
+   uint64_t u64;
+};
+
+#define GBM_MAX_PLANES 4
+
+#define __gbm_fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
+			      ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
+
+#define GBM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define GBM_FORMAT_C8		__gbm_fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp Red */
+#define GBM_FORMAT_R8		__gbm_fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define GBM_FORMAT_RG88		__gbm_fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define GBM_FORMAT_GR88		__gbm_fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
+/* 8 bpp RGB */
+#define GBM_FORMAT_RGB332	__gbm_fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define GBM_FORMAT_BGR233	__gbm_fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define GBM_FORMAT_XRGB4444	__gbm_fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define GBM_FORMAT_XBGR4444	__gbm_fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define GBM_FORMAT_RGBX4444	__gbm_fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define GBM_FORMAT_BGRX4444	__gbm_fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define GBM_FORMAT_ARGB4444	__gbm_fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define GBM_FORMAT_ABGR4444	__gbm_fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define GBM_FORMAT_RGBA4444	__gbm_fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define GBM_FORMAT_BGRA4444	__gbm_fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define GBM_FORMAT_XRGB1555	__gbm_fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define GBM_FORMAT_XBGR1555	__gbm_fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define GBM_FORMAT_RGBX5551	__gbm_fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define GBM_FORMAT_BGRX5551	__gbm_fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define GBM_FORMAT_ARGB1555	__gbm_fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define GBM_FORMAT_ABGR1555	__gbm_fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define GBM_FORMAT_RGBA5551	__gbm_fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define GBM_FORMAT_BGRA5551	__gbm_fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define GBM_FORMAT_RGB565	__gbm_fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define GBM_FORMAT_BGR565	__gbm_fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define GBM_FORMAT_RGB888	__gbm_fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define GBM_FORMAT_BGR888	__gbm_fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define GBM_FORMAT_XRGB8888	__gbm_fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define GBM_FORMAT_XBGR8888	__gbm_fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define GBM_FORMAT_RGBX8888	__gbm_fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define GBM_FORMAT_BGRX8888	__gbm_fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define GBM_FORMAT_ARGB8888	__gbm_fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define GBM_FORMAT_ABGR8888	__gbm_fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define GBM_FORMAT_RGBA8888	__gbm_fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define GBM_FORMAT_BGRA8888	__gbm_fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define GBM_FORMAT_XRGB2101010	__gbm_fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define GBM_FORMAT_XBGR2101010	__gbm_fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define GBM_FORMAT_RGBX1010102	__gbm_fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define GBM_FORMAT_BGRX1010102	__gbm_fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define GBM_FORMAT_ARGB2101010	__gbm_fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define GBM_FORMAT_ABGR2101010	__gbm_fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define GBM_FORMAT_RGBA1010102	__gbm_fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define GBM_FORMAT_BGRA1010102	__gbm_fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define GBM_FORMAT_YUYV		__gbm_fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define GBM_FORMAT_YVYU		__gbm_fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define GBM_FORMAT_UYVY		__gbm_fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define GBM_FORMAT_VYUY		__gbm_fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define GBM_FORMAT_AYUV		__gbm_fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define GBM_FORMAT_NV12		__gbm_fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define GBM_FORMAT_NV21		__gbm_fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define GBM_FORMAT_NV16		__gbm_fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define GBM_FORMAT_NV61		__gbm_fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define GBM_FORMAT_YUV410	__gbm_fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define GBM_FORMAT_YVU410	__gbm_fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define GBM_FORMAT_YUV411	__gbm_fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define GBM_FORMAT_YVU411	__gbm_fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define GBM_FORMAT_YUV420	__gbm_fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define GBM_FORMAT_YVU420	__gbm_fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define GBM_FORMAT_YUV422	__gbm_fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define GBM_FORMAT_YVU422	__gbm_fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define GBM_FORMAT_YUV444	__gbm_fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define GBM_FORMAT_YVU444	__gbm_fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB.  This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below.  The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define GBM_FORMAT_MOD_NONE           0
+#define GBM_FORMAT_MOD_VENDOR_INTEL   0x01
+#define GBM_FORMAT_MOD_VENDOR_AMD     0x02
+#define GBM_FORMAT_MOD_VENDOR_NV      0x03
+#define GBM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define GBM_FORMAT_MOD_VENDOR_QCOM    0x05
+/* add more to the end as needed */
+
+#define gbm_fourcc_mod_code(vendor, val) \
+	((((__u64)GBM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/**
+ * Flags to indicate the intended use for the buffer - these are passed into
+ * gbm_bo_create(). The caller must set the union of all the flags that are
+ * appropriate
+ *
+ * \sa Use gbm_device_is_format_supported() to check if the combination of format
+ * and use flags are supported
+ */
+enum gbm_bo_flags {
+   /**
+    * Buffer is going to be presented to the screen using an API such as KMS
+    */
+   GBM_BO_USE_SCANOUT      = (1 << 0),
+   /**
+    * Buffer is going to be used as cursor
+    */
+   GBM_BO_USE_CURSOR       = (1 << 1),
+   /**
+    * Deprecated
+    */
+   GBM_BO_USE_CURSOR_64X64 = GBM_BO_USE_CURSOR,
+   /**
+    * Buffer is to be used for rendering - for example it is going to be used
+    * as the storage for a color buffer
+    */
+   GBM_BO_USE_RENDERING    = (1 << 2),
+   /**
+    * Deprecated
+    */
+   GBM_BO_USE_WRITE    = (1 << 3),
+   /**
+    * Buffer is guaranteed to be laid out linearly in memory. That is, the
+    * buffer is laid out as an array with 'height' blocks, each block with
+    * length 'stride'. Each stride is in the same order as the rows of the
+    * buffer.
+    */
+   GBM_BO_USE_LINEAR    = (1 << 4),
+};
+
+int
+gbm_device_get_fd(struct gbm_device *gbm);
+
+const char *
+gbm_device_get_backend_name(struct gbm_device *gbm);
+
+int
+gbm_device_is_format_supported(struct gbm_device *gbm,
+                               uint32_t format, uint32_t usage);
+
+void
+gbm_device_destroy(struct gbm_device *gbm);
+
+struct gbm_device *
+gbm_create_device(int fd);
+
+struct gbm_bo *
+gbm_bo_create(struct gbm_device *gbm,
+              uint32_t width, uint32_t height,
+              uint32_t format, uint32_t flags);
+
+struct gbm_bo *
+gbm_bo_create_with_modifiers(struct gbm_device *gbm,
+                             uint32_t width, uint32_t height,
+                             uint32_t format,
+                             const uint64_t *modifiers, uint32_t count);
+
+#define GBM_BO_IMPORT_WL_BUFFER         0x5501
+#define GBM_BO_IMPORT_EGL_IMAGE         0x5502
+#define GBM_BO_IMPORT_FD                0x5503
+#define GBM_BO_IMPORT_FD_PLANAR         0x5504
+
+struct gbm_import_fd_data {
+   int fd;
+   uint32_t width;
+   uint32_t height;
+   uint32_t stride;
+   uint32_t format;
+};
+
+struct gbm_import_fd_planar_data {
+   int fds[GBM_MAX_PLANES];
+   uint32_t width;
+   uint32_t height;
+   uint32_t format;
+   uint32_t strides[GBM_MAX_PLANES];
+   uint32_t offsets[GBM_MAX_PLANES];
+   uint64_t format_modifiers[GBM_MAX_PLANES];
+};
+
+struct gbm_bo *
+gbm_bo_import(struct gbm_device *gbm, uint32_t type,
+              void *buffer, uint32_t usage);
+
+/**
+ * Flags to indicate the type of mapping for the buffer - these are
+ * passed into gbm_bo_map(). The caller must set the union of all the
+ * flags that are appropriate.
+ *
+ * These flags are independent of the GBM_BO_USE_* creation flags. However,
+ * mapping the buffer may require copying to/from a staging buffer.
+ *
+ * See also: pipe_transfer_usage
+ */
+enum gbm_bo_transfer_flags {
+   /**
+    * Buffer contents read back (or accessed directly) at transfer
+    * create time.
+    */
+   GBM_BO_TRANSFER_READ       = (1 << 0),
+   /**
+    * Buffer contents will be written back at unmap time
+    * (or modified as a result of being accessed directly).
+    */
+   GBM_BO_TRANSFER_WRITE      = (1 << 1),
+   /**
+    * Read/modify/write
+    */
+   GBM_BO_TRANSFER_READ_WRITE = (GBM_BO_TRANSFER_READ | GBM_BO_TRANSFER_WRITE),
+};
+
+void *
+gbm_bo_map(struct gbm_bo *bo,
+           uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+           uint32_t flags, uint32_t *stride, void **map_data, size_t plane);
+
+void
+gbm_bo_unmap(struct gbm_bo *bo, void *map_data);
+
+uint32_t
+gbm_bo_get_width(struct gbm_bo *bo);
+
+uint32_t
+gbm_bo_get_height(struct gbm_bo *bo);
+
+uint32_t
+gbm_bo_get_stride(struct gbm_bo *bo);
+
+/* Tegra bringup hack to pass tiling parameters at EGLImage creation. */
+uint32_t
+gbm_bo_get_stride_or_tiling(struct gbm_bo *bo);
+
+uint32_t
+gbm_bo_get_format(struct gbm_bo *bo);
+
+uint64_t
+gbm_bo_get_format_modifier(struct gbm_bo *bo);
+
+struct gbm_device *
+gbm_bo_get_device(struct gbm_bo *bo);
+
+union gbm_bo_handle
+gbm_bo_get_handle(struct gbm_bo *bo);
+
+int
+gbm_bo_get_fd(struct gbm_bo *bo);
+
+size_t
+gbm_bo_get_num_planes(struct gbm_bo *bo);
+
+union gbm_bo_handle
+gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane);
+
+int
+gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane);
+
+uint32_t
+gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane);
+
+uint32_t
+gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane);
+
+uint32_t
+gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane);
+
+uint64_t
+gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane);
+
+void
+gbm_bo_set_user_data(struct gbm_bo *bo, void *data,
+		     void (*destroy_user_data)(struct gbm_bo *, void *));
+
+void *
+gbm_bo_get_user_data(struct gbm_bo *bo);
+
+void
+gbm_bo_destroy(struct gbm_bo *bo);
+
+struct gbm_surface *
+gbm_surface_create(struct gbm_device *gbm,
+                   uint32_t width, uint32_t height,
+		   uint32_t format, uint32_t flags);
+
+struct gbm_bo *
+gbm_surface_lock_front_buffer(struct gbm_surface *surface);
+
+void
+gbm_surface_release_buffer(struct gbm_surface *surface, struct gbm_bo *bo);
+
+int
+gbm_surface_has_free_buffers(struct gbm_surface *surface);
+
+void
+gbm_surface_destroy(struct gbm_surface *surface);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gbm.pc b/gbm.pc
new file mode 100644
index 0000000..e99ac83
--- /dev/null
+++ b/gbm.pc
@@ -0,0 +1,10 @@
+prefix=/usr
+exec_prefix=${prefix}
+includedir=${prefix}/include
+libdir=${exec_prefix}/lib
+
+Name: libgbm
+Description: A small gbm implementation
+Version: 0
+Cflags: -I${includedir}
+Libs: -L${libdir} -lgbm
diff --git a/gbm_helpers.c b/gbm_helpers.c
new file mode 100644
index 0000000..c60a315
--- /dev/null
+++ b/gbm_helpers.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include "drv.h"
+#include "gbm.h"
+
+uint64_t gbm_convert_flags(uint32_t flags)
+{
+	uint64_t usage = BO_USE_NONE;
+
+	if (flags & GBM_BO_USE_SCANOUT)
+		usage |= BO_USE_SCANOUT;
+	if (flags & GBM_BO_USE_CURSOR)
+		usage |= BO_USE_CURSOR;
+	if (flags & GBM_BO_USE_CURSOR_64X64)
+		usage |= BO_USE_CURSOR_64X64;
+	if (flags & GBM_BO_USE_RENDERING)
+		usage |= BO_USE_RENDERING;
+	if (flags & GBM_BO_USE_LINEAR)
+		usage |= BO_USE_LINEAR;
+
+	return usage;
+}
diff --git a/gbm_helpers.h b/gbm_helpers.h
new file mode 100644
index 0000000..48ff391
--- /dev/null
+++ b/gbm_helpers.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GBM_HELPERS_H
+#define GBM_HELPERS_H
+
+uint64_t gbm_convert_flags(uint32_t flags);
+
+#endif
diff --git a/gbm_priv.h b/gbm_priv.h
new file mode 100644
index 0000000..3dc1e59
--- /dev/null
+++ b/gbm_priv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GBM_PRIV_H
+#define GBM_PRIV_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "drv.h"
+#include "gbm.h"
+
+struct gbm_device {
+	struct driver *drv;
+};
+
+struct gbm_surface {
+};
+
+struct gbm_bo {
+	struct gbm_device *gbm;
+	struct bo *bo;
+	uint32_t gbm_format;
+	void *user_data;
+	void (*destroy_user_data)(struct gbm_bo *, void *);
+};
+
+#endif
diff --git a/gma500.c b/gma500.c
new file mode 100644
index 0000000..5b08bc3
--- /dev/null
+++ b/gma500.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_RGBX8888 };
+
+static int gma500_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+struct backend backend_gma500 = {
+	.name = "gma500",
+	.init = gma500_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
diff --git a/helpers.c b/helpers.c
new file mode 100644
index 0000000..19ee646
--- /dev/null
+++ b/helpers.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+#include <xf86drmMode.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
+{
+
+	if (plane != 0) {
+		switch (format) {
+		case DRM_FORMAT_YVU420:
+		case DRM_FORMAT_YVU420_ANDROID:
+			stride = DIV_ROUND_UP(stride, 2);
+			break;
+		}
+	}
+
+	return stride;
+}
+
+static uint32_t bpp_from_format(uint32_t format, size_t plane)
+{
+	assert(plane < drv_num_planes_from_format(format));
+
+	switch (format) {
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_R8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YVU420_ANDROID:
+		return 8;
+
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return (plane == 0) ? 8 : 4;
+
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_GR88:
+	case DRM_FORMAT_RG88:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+		return 16;
+
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_RGB888:
+		return 24;
+
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XRGB8888:
+		return 32;
+	}
+
+	fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
+	return 0;
+}
+
+uint32_t drv_bo_get_stride_in_pixels(struct bo *bo)
+{
+	uint32_t bytes_per_pixel = DIV_ROUND_UP(bpp_from_format(bo->format, 0), 8);
+	return DIV_ROUND_UP(bo->strides[0], bytes_per_pixel);
+}
+
+/*
+ * This function returns the stride for a given format, width and plane.
+ */
+uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
+{
+	uint32_t stride = DIV_ROUND_UP(width * bpp_from_format(format, plane), 8);
+
+	/*
+	 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
+	 * (see <system/graphics.h>).
+	 */
+	if (format == DRM_FORMAT_YVU420_ANDROID)
+		stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
+
+	return stride;
+}
+
+/*
+ * This function fills in the buffer object given the driver aligned stride of
+ * the first plane, height and a format. This function assumes there is just
+ * one kernel buffer per buffer object.
+ */
+int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
+{
+
+	size_t p, num_planes;
+	uint32_t offset = 0;
+
+	num_planes = drv_num_planes_from_format(format);
+	assert(num_planes);
+	bo->total_size = 0;
+
+	for (p = 0; p < num_planes; p++) {
+		bo->strides[p] = subsample_stride(stride, format, p);
+		bo->sizes[p] = drv_size_from_format(format, bo->strides[p], bo->height, p);
+		bo->offsets[p] = offset;
+		offset += bo->sizes[p];
+		bo->total_size += drv_size_from_format(format, bo->strides[p], aligned_height, p);
+	}
+
+	return 0;
+}
+
+int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+		       uint32_t flags)
+{
+	int ret;
+	size_t plane;
+	uint32_t aligned_width, aligned_height;
+	struct drm_mode_create_dumb create_dumb;
+
+	aligned_width = width;
+	aligned_height = height;
+	if (format == DRM_FORMAT_YVU420_ANDROID) {
+		/*
+		 * Align width to 32 pixels, so chroma strides are 16 bytes as
+		 * Android requires.
+		 */
+		aligned_width = ALIGN(width, 32);
+		aligned_height = 3 * DIV_ROUND_UP(height, 2);
+	}
+
+	memset(&create_dumb, 0, sizeof(create_dumb));
+	create_dumb.height = aligned_height;
+	create_dumb.width = aligned_width;
+	create_dumb.bpp = bpp_from_format(format, 0);
+	create_dumb.flags = 0;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_MODE_CREATE_DUMB failed\n");
+		return ret;
+	}
+
+	drv_bo_from_format(bo, create_dumb.pitch, height, format);
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = create_dumb.handle;
+
+	bo->total_size = create_dumb.size;
+	return 0;
+}
+
+int drv_dumb_bo_destroy(struct bo *bo)
+{
+	struct drm_mode_destroy_dumb destroy_dumb;
+	int ret;
+
+	memset(&destroy_dumb, 0, sizeof(destroy_dumb));
+	destroy_dumb.handle = bo->handles[0].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n",
+			bo->handles[0].u32);
+		return ret;
+	}
+
+	return 0;
+}
+
+int drv_gem_bo_destroy(struct bo *bo)
+{
+	struct drm_gem_close gem_close;
+	int ret, error = 0;
+	size_t plane, i;
+
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		for (i = 0; i < plane; i++)
+			if (bo->handles[i].u32 == bo->handles[plane].u32)
+				break;
+		/* Make sure close hasn't already been called on this handle */
+		if (i != plane)
+			continue;
+
+		memset(&gem_close, 0, sizeof(gem_close));
+		gem_close.handle = bo->handles[plane].u32;
+
+		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+		if (ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
+				bo->handles[plane].u32, ret);
+			error = ret;
+		}
+	}
+
+	return error;
+}
+
+int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+	int ret;
+	size_t plane;
+	struct drm_prime_handle prime_handle;
+
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		memset(&prime_handle, 0, sizeof(prime_handle));
+		prime_handle.fd = data->fds[plane];
+
+		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
+
+		if (ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n",
+				prime_handle.fd);
+
+			/*
+			 * Need to call GEM close on planes that were opened,
+			 * if any. Adjust the num_planes variable to be the
+			 * plane that failed, so GEM close will be called on
+			 * planes before that plane.
+			 */
+			bo->num_planes = plane;
+			drv_gem_bo_destroy(bo);
+			return ret;
+		}
+
+		bo->handles[plane].u32 = prime_handle.handle;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++) {
+		pthread_mutex_lock(&bo->drv->driver_lock);
+		drv_increment_reference_count(bo->drv, bo, plane);
+		pthread_mutex_unlock(&bo->drv->driver_lock);
+	}
+
+	return 0;
+}
+
+void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	size_t i;
+	struct drm_mode_map_dumb map_dumb;
+
+	memset(&map_dumb, 0, sizeof(map_dumb));
+	map_dumb.handle = bo->handles[plane].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_MODE_MAP_DUMB failed \n");
+		return MAP_FAILED;
+	}
+
+	for (i = 0; i < bo->num_planes; i++)
+		if (bo->handles[i].u32 == bo->handles[plane].u32)
+			data->length += bo->sizes[i];
+
+	return mmap(0, data->length, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+		    map_dumb.offset);
+}
+
+uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
+{
+	void *count;
+	uintptr_t num = 0;
+
+	if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
+		num = (uintptr_t)(count);
+
+	return num;
+}
+
+void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
+{
+	uintptr_t num = drv_get_reference_count(drv, bo, plane);
+
+	/* If a value isn't in the table, drmHashDelete is a no-op */
+	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
+	drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
+}
+
+void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
+{
+	uintptr_t num = drv_get_reference_count(drv, bo, plane);
+
+	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
+
+	if (num > 0)
+		drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
+}
+
+uint32_t drv_log_base2(uint32_t value)
+{
+	int ret = 0;
+
+	while (value >>= 1)
+		++ret;
+
+	return ret;
+}
+
+int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
+			uint64_t usage)
+{
+	struct combinations *combos = &drv->backend->combos;
+	if (combos->size >= combos->allocations) {
+		struct combination *new_data;
+		combos->allocations *= 2;
+		new_data = realloc(combos->data, combos->allocations * sizeof(*combos->data));
+		if (!new_data)
+			return -ENOMEM;
+
+		combos->data = new_data;
+	}
+
+	combos->data[combos->size].format = format;
+	combos->data[combos->size].metadata.priority = metadata->priority;
+	combos->data[combos->size].metadata.tiling = metadata->tiling;
+	combos->data[combos->size].metadata.modifier = metadata->modifier;
+	combos->data[combos->size].usage = usage;
+	combos->size++;
+	return 0;
+}
+
+int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
+			 struct format_metadata *metadata, uint64_t usage)
+{
+	int ret;
+	uint32_t i;
+	for (i = 0; i < num_formats; i++) {
+		ret = drv_add_combination(drv, formats[i], metadata, usage);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
+			    uint64_t usage)
+{
+	uint32_t i;
+	struct combination *combo;
+	/* Attempts to add the specified usage to an existing combination. */
+	for (i = 0; i < drv->backend->combos.size; i++) {
+		combo = &drv->backend->combos.data[i];
+		if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
+		    combo->metadata.modifier == metadata->modifier)
+			combo->usage |= usage;
+	}
+}
+
+struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items)
+{
+	uint64_t flag, usage;
+	struct kms_item *items;
+	uint32_t i, j, k, allocations, item_size;
+
+	drmModePlanePtr plane;
+	drmModePropertyPtr prop;
+	drmModePlaneResPtr resources;
+	drmModeObjectPropertiesPtr props;
+
+	/* Start with a power of 2 number of allocations. */
+	allocations = 2;
+	item_size = 0;
+	items = calloc(allocations, sizeof(*items));
+	if (!items)
+		goto out;
+
+	/*
+	 * The ability to return universal planes is only complete on
+	 * ChromeOS kernel versions >= v3.18.  The SET_CLIENT_CAP ioctl
+	 * therefore might return an error code, so don't check it.  If it
+	 * fails, it'll just return the plane list as overlay planes, which is
+	 * fine in our case (our drivers already have cursor bits set).
+	 * modetest in libdrm does the same thing.
+	 */
+	drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
+
+	resources = drmModeGetPlaneResources(drv->fd);
+	if (!resources)
+		goto out;
+
+	for (i = 0; i < resources->count_planes; i++) {
+		plane = drmModeGetPlane(drv->fd, resources->planes[i]);
+		if (!plane)
+			goto out;
+
+		props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE);
+		if (!props)
+			goto out;
+
+		for (j = 0; j < props->count_props; j++) {
+			prop = drmModeGetProperty(drv->fd, props->props[j]);
+			if (prop) {
+				if (strcmp(prop->name, "type") == 0) {
+					flag = props->prop_values[j];
+				}
+
+				drmModeFreeProperty(prop);
+			}
+		}
+
+		switch (flag) {
+		case DRM_PLANE_TYPE_OVERLAY:
+		case DRM_PLANE_TYPE_PRIMARY:
+			usage = BO_USE_SCANOUT;
+			break;
+		case DRM_PLANE_TYPE_CURSOR:
+			usage = BO_USE_CURSOR;
+			break;
+		default:
+			assert(0);
+		}
+
+		for (j = 0; j < plane->count_formats; j++) {
+			bool found = false;
+			for (k = 0; k < item_size; k++) {
+				if (items[k].format == plane->formats[j] &&
+				    items[k].modifier == DRM_FORMAT_MOD_NONE) {
+					items[k].usage |= usage;
+					found = true;
+					break;
+				}
+			}
+
+			if (!found && item_size >= allocations) {
+				struct kms_item *new_data = NULL;
+				allocations *= 2;
+				new_data = realloc(items, allocations * sizeof(*items));
+				if (!new_data) {
+					item_size = 0;
+					goto out;
+				}
+
+				items = new_data;
+			}
+
+			if (!found) {
+				items[item_size].format = plane->formats[j];
+				items[item_size].modifier = DRM_FORMAT_MOD_NONE;
+				items[item_size].usage = usage;
+				item_size++;
+			}
+		}
+
+		drmModeFreeObjectProperties(props);
+		drmModeFreePlane(plane);
+	}
+
+	drmModeFreePlaneResources(resources);
+out:
+	if (items && item_size == 0) {
+		free(items);
+		items = NULL;
+	}
+
+	*num_items = item_size;
+	return items;
+}
+
+int drv_modify_linear_combinations(struct driver *drv)
+{
+	uint32_t i, j, num_items;
+	struct kms_item *items;
+	struct combination *combo;
+
+	/*
+	 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
+	 * plane and as a cursor. Some drivers don't support
+	 * drmModeGetPlaneResources, so add the combination here. Note that the
+	 * kernel disregards the alpha component of ARGB unless it's an overlay
+	 * plane.
+	 */
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
+			       BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
+			       BO_USE_CURSOR | BO_USE_SCANOUT);
+
+	items = drv_query_kms(drv, &num_items);
+	if (!items || !num_items)
+		return 0;
+
+	for (i = 0; i < num_items; i++) {
+		for (j = 0; j < drv->backend->combos.size; j++) {
+			combo = &drv->backend->combos.data[j];
+			if (items[i].format == combo->format)
+				combo->usage |= BO_USE_SCANOUT;
+		}
+	}
+
+	free(items);
+	return 0;
+}
diff --git a/helpers.h b/helpers.h
new file mode 100644
index 0000000..b42a3c2
--- /dev/null
+++ b/helpers.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef HELPERS_H
+#define HELPERS_H
+
+#include "drv.h"
+
+uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane);
+int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format);
+int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+		       uint32_t flags);
+int drv_dumb_bo_destroy(struct bo *bo);
+int drv_gem_bo_destroy(struct bo *bo);
+int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
+void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane);
+uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane);
+void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane);
+void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane);
+uint32_t drv_log_base2(uint32_t value);
+int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
+			uint64_t usage);
+int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
+			 struct format_metadata *metadata, uint64_t usage);
+void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
+			    uint64_t usage);
+struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items);
+int drv_modify_linear_combinations(struct driver *drv);
+#endif
diff --git a/i915.c b/i915.c
new file mode 100644
index 0000000..aee3055
--- /dev/null
+++ b/i915.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_I915
+
+#include <errno.h>
+#include <i915_drm.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#define I915_CACHELINE_SIZE 64
+#define I915_CACHELINE_MASK (I915_CACHELINE_SIZE - 1)
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR8888,
+						  DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+						  DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB1555,
+						  DRM_FORMAT_XRGB8888 };
+
+static const uint32_t tileable_texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
+							    DRM_FORMAT_UYVY, DRM_FORMAT_YUYV };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+struct i915_device {
+	uint32_t gen;
+	int32_t has_llc;
+};
+
+static uint32_t i915_get_gen(int device_id)
+{
+	const uint16_t gen3_ids[] = { 0x2582, 0x2592, 0x2772, 0x27A2, 0x27AE,
+				      0x29C2, 0x29B2, 0x29D2, 0xA001, 0xA011 };
+	unsigned i;
+	for (i = 0; i < ARRAY_SIZE(gen3_ids); i++)
+		if (gen3_ids[i] == device_id)
+			return 3;
+
+	return 4;
+}
+
+static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
+{
+	uint32_t i;
+	struct combination *combo;
+
+	/*
+	 * Older hardware can't scanout Y-tiled formats. Newer devices can, and
+	 * report this functionality via format modifiers.
+	 */
+	for (i = 0; i < drv->backend->combos.size; i++) {
+		combo = &drv->backend->combos.data[i];
+		if (combo->format == item->format) {
+			if ((combo->metadata.tiling == I915_TILING_Y &&
+			     item->modifier == I915_FORMAT_MOD_Y_TILED) ||
+			    (combo->metadata.tiling == I915_TILING_X &&
+			     item->modifier == I915_FORMAT_MOD_X_TILED)) {
+				combo->metadata.modifier = item->modifier;
+				combo->usage |= item->usage;
+			} else if (combo->metadata.tiling != I915_TILING_Y) {
+				combo->usage |= item->usage;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int i915_add_combinations(struct driver *drv)
+{
+	int ret;
+	uint32_t i, num_items;
+	struct kms_item *items;
+	struct format_metadata metadata;
+	uint64_t render_flags, texture_flags;
+
+	render_flags = BO_USE_RENDER_MASK;
+	texture_flags = BO_USE_TEXTURE_MASK;
+
+	metadata.tiling = I915_TILING_NONE;
+	metadata.priority = 1;
+	metadata.modifier = DRM_FORMAT_MOD_NONE;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, render_flags);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &metadata, texture_flags);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, tileable_texture_source_formats,
+				   ARRAY_SIZE(texture_source_formats), &metadata, texture_flags);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+
+	render_flags &= ~BO_USE_SW_WRITE_OFTEN;
+	render_flags &= ~BO_USE_SW_READ_OFTEN;
+	render_flags &= ~BO_USE_LINEAR;
+
+	texture_flags &= ~BO_USE_SW_WRITE_OFTEN;
+	texture_flags &= ~BO_USE_SW_READ_OFTEN;
+	texture_flags &= ~BO_USE_LINEAR;
+
+	metadata.tiling = I915_TILING_X;
+	metadata.priority = 2;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, render_flags);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, tileable_texture_source_formats,
+				   ARRAY_SIZE(tileable_texture_source_formats), &metadata,
+				   texture_flags);
+	if (ret)
+		return ret;
+
+	metadata.tiling = I915_TILING_Y;
+	metadata.priority = 3;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, render_flags);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, tileable_texture_source_formats,
+				   ARRAY_SIZE(tileable_texture_source_formats), &metadata,
+				   texture_flags);
+	if (ret)
+		return ret;
+
+	items = drv_query_kms(drv, &num_items);
+	if (!items || !num_items)
+		return 0;
+
+	for (i = 0; i < num_items; i++) {
+		ret = i915_add_kms_item(drv, &items[i]);
+		if (ret) {
+			free(items);
+			return ret;
+		}
+	}
+
+	free(items);
+	return 0;
+}
+
+static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *stride,
+				 uint32_t *aligned_height)
+{
+	struct i915_device *i915 = bo->drv->priv;
+	uint32_t horizontal_alignment = 4;
+	uint32_t vertical_alignment = 4;
+
+	switch (tiling) {
+	default:
+	case I915_TILING_NONE:
+		horizontal_alignment = 64;
+		break;
+
+	case I915_TILING_X:
+		horizontal_alignment = 512;
+		vertical_alignment = 8;
+		break;
+
+	case I915_TILING_Y:
+		if (i915->gen == 3) {
+			horizontal_alignment = 512;
+			vertical_alignment = 8;
+		} else {
+			horizontal_alignment = 128;
+			vertical_alignment = 32;
+		}
+		break;
+	}
+
+	*aligned_height = ALIGN(bo->height, vertical_alignment);
+	if (i915->gen > 3) {
+		*stride = ALIGN(*stride, horizontal_alignment);
+	} else {
+		while (*stride > horizontal_alignment)
+			horizontal_alignment <<= 1;
+
+		*stride = horizontal_alignment;
+	}
+
+	if (i915->gen <= 3 && *stride > 8192)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void i915_clflush(void *start, size_t size)
+{
+	void *p = (void *)(((uintptr_t)start) & ~I915_CACHELINE_MASK);
+	void *end = (void *)((uintptr_t)start + size);
+
+	__builtin_ia32_mfence();
+	while (p < end) {
+		__builtin_ia32_clflush(p);
+		p = (void *)((uintptr_t)p + I915_CACHELINE_SIZE);
+	}
+}
+
+static int i915_init(struct driver *drv)
+{
+	int ret;
+	int device_id;
+	struct i915_device *i915;
+	drm_i915_getparam_t get_param;
+
+	i915 = calloc(1, sizeof(*i915));
+	if (!i915)
+		return -ENOMEM;
+
+	memset(&get_param, 0, sizeof(get_param));
+	get_param.param = I915_PARAM_CHIPSET_ID;
+	get_param.value = &device_id;
+	ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
+	if (ret) {
+		fprintf(stderr, "drv: Failed to get I915_PARAM_CHIPSET_ID\n");
+		free(i915);
+		return -EINVAL;
+	}
+
+	i915->gen = i915_get_gen(device_id);
+
+	memset(&get_param, 0, sizeof(get_param));
+	get_param.param = I915_PARAM_HAS_LLC;
+	get_param.value = &i915->has_llc;
+	ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
+	if (ret) {
+		fprintf(stderr, "drv: Failed to get I915_PARAM_HAS_LLC\n");
+		free(i915);
+		return -EINVAL;
+	}
+
+	drv->priv = i915;
+
+	return i915_add_combinations(drv);
+}
+
+static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			  uint32_t flags)
+{
+	int ret;
+	size_t plane;
+	uint32_t stride;
+	struct drm_i915_gem_create gem_create;
+	struct drm_i915_gem_set_tiling gem_set_tiling;
+
+	if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+		bo->tiling = I915_TILING_NONE;
+	else if (flags & BO_USE_SCANOUT)
+		bo->tiling = I915_TILING_X;
+	else
+		bo->tiling = I915_TILING_Y;
+
+	stride = drv_stride_from_format(format, width, 0);
+	/*
+	 * Align the Y plane to 128 bytes so the chroma planes would be aligned
+	 * to 64 byte boundaries. This is an Intel HW requirement.
+	 */
+	if (format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID) {
+		stride = ALIGN(stride, 128);
+		bo->tiling = I915_TILING_NONE;
+	}
+
+	ret = i915_align_dimensions(bo, bo->tiling, &stride, &height);
+	if (ret)
+		return ret;
+
+	drv_bo_from_format(bo, stride, height, format);
+
+	memset(&gem_create, 0, sizeof(gem_create));
+	gem_create.size = bo->total_size;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n",
+			gem_create.size);
+		return ret;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = gem_create.handle;
+
+	memset(&gem_set_tiling, 0, sizeof(gem_set_tiling));
+	gem_set_tiling.handle = bo->handles[0].u32;
+	gem_set_tiling.tiling_mode = bo->tiling;
+	gem_set_tiling.stride = bo->strides[0];
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling);
+	if (ret) {
+		struct drm_gem_close gem_close;
+		memset(&gem_close, 0, sizeof(gem_close));
+		gem_close.handle = bo->handles[0].u32;
+		drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+		fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_TILING failed with %d", errno);
+		return -errno;
+	}
+
+	return 0;
+}
+
+static void i915_close(struct driver *drv)
+{
+	free(drv->priv);
+	drv->priv = NULL;
+}
+
+static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+	int ret;
+	struct drm_i915_gem_get_tiling gem_get_tiling;
+
+	ret = drv_prime_bo_import(bo, data);
+	if (ret)
+		return ret;
+
+	/* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
+	memset(&gem_get_tiling, 0, sizeof(gem_get_tiling));
+	gem_get_tiling.handle = bo->handles[0].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_GET_TILING failed.");
+		return ret;
+	}
+
+	bo->tiling = gem_get_tiling.tiling_mode;
+	return 0;
+}
+
+static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	void *addr;
+	struct drm_i915_gem_set_domain set_domain;
+
+	memset(&set_domain, 0, sizeof(set_domain));
+	set_domain.handle = bo->handles[0].u32;
+	if (bo->tiling == I915_TILING_NONE) {
+		struct drm_i915_gem_mmap gem_map;
+		memset(&gem_map, 0, sizeof(gem_map));
+
+		gem_map.handle = bo->handles[0].u32;
+		gem_map.offset = 0;
+		gem_map.size = bo->total_size;
+
+		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
+		if (ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP failed\n");
+			return MAP_FAILED;
+		}
+
+		addr = (void *)(uintptr_t)gem_map.addr_ptr;
+		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+
+	} else {
+		struct drm_i915_gem_mmap_gtt gem_map;
+		memset(&gem_map, 0, sizeof(gem_map));
+
+		gem_map.handle = bo->handles[0].u32;
+
+		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
+		if (ret) {
+			fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
+			return MAP_FAILED;
+		}
+
+		addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+			    gem_map.offset);
+
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+	}
+
+	if (addr == MAP_FAILED) {
+		fprintf(stderr, "drv: i915 GEM mmap failed\n");
+		return addr;
+	}
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_DOMAIN failed\n");
+		return MAP_FAILED;
+	}
+
+	data->length = bo->total_size;
+	return addr;
+}
+
+static int i915_bo_unmap(struct bo *bo, struct map_info *data)
+{
+	struct i915_device *i915 = bo->drv->priv;
+	if (!i915->has_llc && bo->tiling == I915_TILING_NONE)
+		i915_clflush(data->addr, data->length);
+
+	return munmap(data->addr, data->length);
+}
+
+static uint32_t i915_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+		/*HACK: See b/28671744 */
+		return DRM_FORMAT_XBGR8888;
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_YVU420_ANDROID;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_i915 = {
+	.name = "i915",
+	.init = i915_init,
+	.close = i915_close,
+	.bo_create = i915_bo_create,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = i915_bo_import,
+	.bo_map = i915_bo_map,
+	.bo_unmap = i915_bo_unmap,
+	.resolve_format = i915_resolve_format,
+};
+
+#endif
diff --git a/marvell.c b/marvell.c
new file mode 100644
index 0000000..8114ac7
--- /dev/null
+++ b/marvell.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_MARVELL
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int marvell_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_add_linear_combinations(drv, render_target_formats,
+					   ARRAY_SIZE(render_target_formats));
+}
+
+struct backend backend_marvell = {
+	.name = "marvell",
+	.init = marvell_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
+
+#endif
diff --git a/mediatek.c b/mediatek.c
new file mode 100644
index 0000000..0078cf0
--- /dev/null
+++ b/mediatek.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2015 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_MEDIATEK
+
+// clang-format off
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+#include <mediatek_drm.h>
+// clang-format on
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+						  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
+						  DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+						   DRM_FORMAT_YVU420_ANDROID };
+
+static int mediatek_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			      uint32_t flags)
+{
+	int ret;
+	size_t plane;
+	uint32_t stride;
+	struct drm_mtk_gem_create gem_create;
+
+	/*
+	 * Since the ARM L1 cache line size is 64 bytes, align to that as a
+	 * performance optimization.
+	 */
+	stride = drv_stride_from_format(format, width, 0);
+	stride = ALIGN(stride, 64);
+	drv_bo_from_format(bo, stride, height, format);
+
+	memset(&gem_create, 0, sizeof(gem_create));
+	gem_create.size = bo->total_size;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n",
+			gem_create.size);
+		return ret;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = gem_create.handle;
+
+	return 0;
+}
+
+static void *mediatek_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	struct drm_mtk_gem_map_off gem_map;
+
+	memset(&gem_map, 0, sizeof(gem_map));
+	gem_map.handle = bo->handles[0].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n");
+		return MAP_FAILED;
+	}
+
+	data->length = bo->total_size;
+
+	return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+		    gem_map.offset);
+}
+
+static uint32_t mediatek_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+		/*HACK: See b/28671744 */
+		return DRM_FORMAT_XBGR8888;
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_YVU420_ANDROID;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_mediatek = {
+	.name = "mediatek",
+	.init = mediatek_init,
+	.bo_create = mediatek_bo_create,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = mediatek_bo_map,
+	.resolve_format = mediatek_resolve_format,
+};
+
+#endif
diff --git a/nouveau.c b/nouveau.c
new file mode 100644
index 0000000..7cdab3a
--- /dev/null
+++ b/nouveau.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int nouveau_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+struct backend backend_nouveau = {
+	.name = "nouveau",
+	.init = nouveau_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
diff --git a/presubmit.sh b/presubmit.sh
new file mode 100755
index 0000000..6d55f2a
--- /dev/null
+++ b/presubmit.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+find \
+	'(' -name '*.[ch]' -or -name '*.cc' ')' \
+	-not -name 'gbm.h' \
+	-exec clang-format -style=file -i {} +
diff --git a/rockchip.c b/rockchip.c
new file mode 100644
index 0000000..50ea4ef
--- /dev/null
+++ b/rockchip.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_ROCKCHIP
+
+#include <errno.h>
+#include <rockchip_drm.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+						  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
+						  DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12,
+						   DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+
+static int afbc_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
+{
+	/* We've restricted ourselves to four bytes per pixel. */
+	const uint32_t pixel_size = 4;
+
+	const uint32_t clump_width = 4;
+	const uint32_t clump_height = 4;
+
+#define AFBC_NARROW 1
+#if AFBC_NARROW == 1
+	const uint32_t block_width = 4 * clump_width;
+	const uint32_t block_height = 4 * clump_height;
+#else
+	const uint32_t block_width = 8 * clump_width;
+	const uint32_t block_height = 2 * clump_height;
+#endif
+
+	const uint32_t header_block_size = 16;
+	const uint32_t body_block_size = block_width * block_height * pixel_size;
+	const uint32_t width_in_blocks = DIV_ROUND_UP(width, block_width);
+	const uint32_t height_in_blocks = DIV_ROUND_UP(height, block_height);
+	const uint32_t total_blocks = width_in_blocks * height_in_blocks;
+
+	const uint32_t header_plane_size = total_blocks * header_block_size;
+	const uint32_t body_plane_size = total_blocks * body_block_size;
+
+	/* GPU requires 64 bytes, but EGL import code expects 1024 byte
+	 * alignement for the body plane. */
+	const uint32_t body_plane_alignment = 1024;
+
+	const uint32_t body_plane_offset = ALIGN(header_plane_size, body_plane_alignment);
+	const uint32_t total_size = body_plane_offset + body_plane_size;
+
+	bo->strides[0] = width_in_blocks * block_width * pixel_size;
+	bo->sizes[0] = total_size;
+	bo->offsets[0] = 0;
+
+	bo->total_size = total_size;
+
+	bo->format_modifiers[0] = DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC;
+
+	return 0;
+}
+
+static int rockchip_add_kms_item(struct driver *drv, const struct kms_item *item)
+{
+	int ret;
+	uint32_t i, j;
+	uint64_t flags;
+	struct combination *combo;
+	struct format_metadata metadata;
+
+	for (i = 0; i < drv->backend->combos.size; i++) {
+		combo = &drv->backend->combos.data[i];
+		if (combo->format == item->format) {
+			if (item->modifier == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) {
+				flags = BO_USE_RENDERING | BO_USE_SCANOUT | BO_USE_TEXTURE;
+				metadata.modifier = item->modifier;
+				metadata.tiling = 0;
+				metadata.priority = 2;
+
+				for (j = 0; j < ARRAY_SIZE(texture_source_formats); j++) {
+					if (item->format == texture_source_formats[j])
+						flags &= ~BO_USE_RENDERING;
+				}
+
+				ret = drv_add_combination(drv, item[i].format, &metadata, flags);
+				if (ret)
+					return ret;
+			} else {
+				combo->usage |= item->usage;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int rockchip_init(struct driver *drv)
+{
+	int ret;
+	uint32_t i, num_items;
+	struct kms_item *items;
+	struct format_metadata metadata;
+
+	metadata.tiling = 0;
+	metadata.priority = 1;
+	metadata.modifier = DRM_FORMAT_MOD_NONE;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &metadata, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+
+	items = drv_query_kms(drv, &num_items);
+	if (!items || !num_items)
+		return 0;
+
+	for (i = 0; i < num_items; i++) {
+		ret = rockchip_add_kms_item(drv, &items[i]);
+		if (ret) {
+			free(items);
+			return ret;
+		}
+	}
+
+	free(items);
+	return 0;
+}
+
+static bool has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
+{
+	uint32_t i;
+
+	for (i = 0; i < count; i++)
+		if (list[i] == modifier)
+			return true;
+
+	return false;
+}
+
+static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+					     uint32_t format, const uint64_t *modifiers,
+					     uint32_t count)
+{
+	int ret;
+	size_t plane;
+	struct drm_rockchip_gem_create gem_create;
+
+	if (format == DRM_FORMAT_NV12) {
+		uint32_t w_mbs = DIV_ROUND_UP(ALIGN(width, 16), 16);
+		uint32_t h_mbs = DIV_ROUND_UP(ALIGN(height, 16), 16);
+
+		uint32_t aligned_width = w_mbs * 16;
+		uint32_t aligned_height = DIV_ROUND_UP(h_mbs * 16 * 3, 2);
+
+		drv_bo_from_format(bo, aligned_width, height, format);
+		bo->total_size = bo->strides[0] * aligned_height + w_mbs * h_mbs * 128;
+	} else if (width <= 2560 &&
+		   has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)) {
+		/* If the caller has decided they can use AFBC, always
+		 * pick that */
+		afbc_bo_from_format(bo, width, height, format);
+	} else {
+		if (!has_modifier(modifiers, count, DRM_FORMAT_MOD_NONE)) {
+			errno = EINVAL;
+			fprintf(stderr, "no usable modifier found\n");
+			return -1;
+		}
+
+		uint32_t stride;
+		/*
+		 * Since the ARM L1 cache line size is 64 bytes, align to that
+		 * as a performance optimization. For YV12, the Mali cmem allocator
+		 * requires that chroma planes are aligned to 64-bytes, so align the
+		 * luma plane to 128 bytes.
+		 */
+		stride = drv_stride_from_format(format, width, 0);
+		if (format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID)
+			stride = ALIGN(stride, 128);
+		else
+			stride = ALIGN(stride, 64);
+
+		drv_bo_from_format(bo, stride, height, format);
+	}
+
+	memset(&gem_create, 0, sizeof(gem_create));
+	gem_create.size = bo->total_size;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create);
+
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n",
+			gem_create.size);
+		return ret;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = gem_create.handle;
+
+	return 0;
+}
+
+static int rockchip_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			      uint32_t flags)
+{
+	uint64_t modifiers[] = { DRM_FORMAT_MOD_NONE };
+
+	return rockchip_bo_create_with_modifiers(bo, width, height, format, modifiers,
+						 ARRAY_SIZE(modifiers));
+}
+
+static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	struct drm_rockchip_gem_map_off gem_map;
+
+	/* We can only map buffers created with SW access flags, which should
+	 * have no modifiers (ie, not AFBC). */
+	if (bo->format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)
+		return MAP_FAILED;
+
+	memset(&gem_map, 0, sizeof(gem_map));
+	gem_map.handle = bo->handles[0].u32;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
+		return MAP_FAILED;
+	}
+
+	data->length = bo->total_size;
+
+	return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+		    gem_map.offset);
+}
+
+static uint32_t rockchip_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+		/*HACK: See b/28671744 */
+		return DRM_FORMAT_XBGR8888;
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_NV12;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_rockchip = {
+	.name = "rockchip",
+	.init = rockchip_init,
+	.bo_create = rockchip_bo_create,
+	.bo_create_with_modifiers = rockchip_bo_create_with_modifiers,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = rockchip_bo_map,
+	.resolve_format = rockchip_resolve_format,
+};
+
+#endif
diff --git a/tegra.c b/tegra.c
new file mode 100644
index 0000000..9fca689
--- /dev/null
+++ b/tegra.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_TEGRA
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <tegra_drm.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+/*
+ * GOB (Group Of Bytes) is the basic unit of the blocklinear layout.
+ * GOBs are arranged to blocks, where the height of the block (measured
+ * in GOBs) is configurable.
+ */
+#define NV_BLOCKLINEAR_GOB_HEIGHT 8
+#define NV_BLOCKLINEAR_GOB_WIDTH 64
+#define NV_DEFAULT_BLOCK_HEIGHT_LOG2 4
+#define NV_PREFERRED_PAGE_SIZE (128 * 1024)
+
+// clang-format off
+enum nv_mem_kind
+{
+	NV_MEM_KIND_PITCH = 0,
+	NV_MEM_KIND_C32_2CRA = 0xdb,
+	NV_MEM_KIND_GENERIC_16Bx2 = 0xfe,
+};
+
+enum tegra_map_type {
+	TEGRA_READ_TILED_BUFFER = 0,
+	TEGRA_WRITE_TILED_BUFFER = 1,
+};
+// clang-format on
+
+struct tegra_private_map_data {
+	void *tiled;
+	void *untiled;
+};
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int compute_block_height_log2(int height)
+{
+	int block_height_log2 = NV_DEFAULT_BLOCK_HEIGHT_LOG2;
+
+	if (block_height_log2 > 0) {
+		/* Shrink, if a smaller block height could cover the whole
+		 * surface height. */
+		int proposed = NV_BLOCKLINEAR_GOB_HEIGHT << (block_height_log2 - 1);
+		while (proposed >= height) {
+			block_height_log2--;
+			if (block_height_log2 == 0)
+				break;
+			proposed /= 2;
+		}
+	}
+	return block_height_log2;
+}
+
+static void compute_layout_blocklinear(int width, int height, int format, enum nv_mem_kind *kind,
+				       uint32_t *block_height_log2, uint32_t *stride,
+				       uint32_t *size)
+{
+	int pitch = drv_stride_from_format(format, width, 0);
+
+	/* Align to blocklinear blocks. */
+	pitch = ALIGN(pitch, NV_BLOCKLINEAR_GOB_WIDTH);
+
+	/* Compute padded height. */
+	*block_height_log2 = compute_block_height_log2(height);
+	int block_height = 1 << *block_height_log2;
+	int padded_height = ALIGN(height, NV_BLOCKLINEAR_GOB_HEIGHT * block_height);
+
+	int bytes = pitch * padded_height;
+
+	/* Pad the allocation to the preferred page size.
+	 * This will reduce the required page table size (see discussion in NV
+	 * bug 1321091), and also acts as a WAR for NV bug 1325421.
+	 */
+	bytes = ALIGN(bytes, NV_PREFERRED_PAGE_SIZE);
+
+	*kind = NV_MEM_KIND_C32_2CRA;
+	*stride = pitch;
+	*size = bytes;
+}
+
+static void compute_layout_linear(int width, int height, int format, uint32_t *stride,
+				  uint32_t *size)
+{
+	*stride = ALIGN(drv_stride_from_format(format, width, 0), 64);
+	*size = *stride * height;
+}
+
+static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum tegra_map_type type,
+			  uint32_t bytes_per_pixel, uint32_t gob_top, uint32_t gob_left,
+			  uint32_t gob_size_pixels)
+{
+	uint8_t *tmp;
+	uint32_t x, y, k;
+	for (k = 0; k < gob_size_pixels; k++) {
+		/*
+		 * Given the kth pixel starting from the tile specified by
+		 * gob_top and gob_left, unswizzle to get the standard (x, y)
+		 * representation.
+		 */
+		x = gob_left + (((k >> 3) & 8) | ((k >> 1) & 4) | (k & 3));
+		y = gob_top + ((k >> 7 << 3) | ((k >> 3) & 6) | ((k >> 2) & 1));
+
+		tmp = untiled + (y * bo->strides[0]) + (x * bytes_per_pixel);
+
+		if (type == TEGRA_READ_TILED_BUFFER)
+			memcpy(tmp, tiled, bytes_per_pixel);
+		else if (type == TEGRA_WRITE_TILED_BUFFER)
+			memcpy(tiled, tmp, bytes_per_pixel);
+
+		/* Move on to next pixel. */
+		tiled += bytes_per_pixel;
+	}
+}
+
+static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untiled,
+				  enum tegra_map_type type)
+{
+	uint32_t gob_width, gob_height, gob_size_bytes, gob_size_pixels, gob_count_x, gob_count_y,
+	    gob_top, gob_left;
+	uint32_t i, j, offset;
+	uint8_t *tmp;
+	uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0);
+
+	/*
+	 * The blocklinear format consists of 8*(2^n) x 64 byte sized tiles,
+	 * where 0 <= n <= 4.
+	 */
+	gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel);
+	gob_height = NV_BLOCKLINEAR_GOB_HEIGHT * (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2);
+	/* Calculate the height from maximum possible gob height */
+	while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->height)
+		gob_height /= 2;
+
+	gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH;
+	gob_size_pixels = gob_height * gob_width;
+
+	gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
+	gob_count_y = DIV_ROUND_UP(bo->height, gob_height);
+
+	offset = 0;
+	for (j = 0; j < gob_count_y; j++) {
+		gob_top = j * gob_height;
+		for (i = 0; i < gob_count_x; i++) {
+			tmp = tiled + offset;
+			gob_left = i * gob_width;
+
+			transfer_tile(bo, tmp, untiled, type, bytes_per_pixel, gob_top, gob_left,
+				      gob_size_pixels);
+
+			offset += gob_size_bytes;
+		}
+	}
+}
+
+static int tegra_init(struct driver *drv)
+{
+	int ret;
+	struct format_metadata metadata;
+	uint64_t flags = BO_USE_RENDER_MASK;
+
+	metadata.tiling = NV_MEM_KIND_PITCH;
+	metadata.priority = 1;
+	metadata.modifier = DRM_FORMAT_MOD_NONE;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+
+	flags &= ~BO_USE_SW_WRITE_OFTEN;
+	flags &= ~BO_USE_SW_READ_OFTEN;
+	flags &= ~BO_USE_LINEAR;
+
+	metadata.tiling = NV_MEM_KIND_C32_2CRA;
+	metadata.priority = 2;
+
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &metadata, flags);
+	if (ret)
+		return ret;
+
+	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
+	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
+	return 0;
+}
+
+static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			   uint32_t flags)
+{
+	uint32_t size, stride, block_height_log2 = 0;
+	enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
+	struct drm_tegra_gem_create gem_create;
+	int ret;
+
+	if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+		compute_layout_linear(width, height, format, &stride, &size);
+	else
+		compute_layout_blocklinear(width, height, format, &kind, &block_height_log2,
+					   &stride, &size);
+
+	memset(&gem_create, 0, sizeof(gem_create));
+	gem_create.size = size;
+	gem_create.flags = 0;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
+		return ret;
+	}
+
+	bo->handles[0].u32 = gem_create.handle;
+	bo->offsets[0] = 0;
+	bo->total_size = bo->sizes[0] = size;
+	bo->strides[0] = stride;
+
+	if (kind != NV_MEM_KIND_PITCH) {
+		struct drm_tegra_gem_set_tiling gem_tile;
+
+		memset(&gem_tile, 0, sizeof(gem_tile));
+		gem_tile.handle = bo->handles[0].u32;
+		gem_tile.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+		gem_tile.value = block_height_log2;
+
+		ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_SET_TILING, &gem_tile,
+					  sizeof(gem_tile));
+		if (ret < 0) {
+			drv_gem_bo_destroy(bo);
+			return ret;
+		}
+
+		/* Encode blocklinear parameters for EGLImage creation. */
+		bo->tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8);
+		bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
+	}
+
+	return 0;
+}
+
+static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	struct drm_tegra_gem_mmap gem_map;
+	struct tegra_private_map_data *priv;
+
+	memset(&gem_map, 0, sizeof(gem_map));
+	gem_map.handle = bo->handles[0].u32;
+
+	ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
+	if (ret < 0) {
+		fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n");
+		return MAP_FAILED;
+	}
+
+	void *addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+			  gem_map.offset);
+
+	data->length = bo->total_size;
+
+	if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
+		priv = calloc(1, sizeof(*priv));
+		priv->untiled = calloc(1, bo->total_size);
+		priv->tiled = addr;
+		data->priv = priv;
+		transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
+		addr = priv->untiled;
+	}
+
+	return addr;
+}
+
+static int tegra_bo_unmap(struct bo *bo, struct map_info *data)
+{
+	if (data->priv) {
+		struct tegra_private_map_data *priv = data->priv;
+		transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
+		data->addr = priv->tiled;
+		free(priv->untiled);
+		free(priv);
+		data->priv = NULL;
+	}
+
+	return munmap(data->addr, data->length);
+}
+
+struct backend backend_tegra = {
+	.name = "tegra",
+	.init = tegra_init,
+	.bo_create = tegra_bo_create,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = tegra_bo_map,
+	.bo_unmap = tegra_bo_unmap,
+};
+
+#endif
diff --git a/udl.c b/udl.c
new file mode 100644
index 0000000..eb15fbe
--- /dev/null
+++ b/udl.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int udl_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+struct backend backend_udl = {
+	.name = "udl",
+	.init = udl_init,
+	.bo_create = drv_dumb_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+};
diff --git a/util.h b/util.h
new file mode 100644
index 0000000..fd61d9b
--- /dev/null
+++ b/util.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2014 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#define MAX(A, B) ((A) > (B) ? (A) : (B))
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A)))
+#define PUBLIC __attribute__((visibility("default")))
+#define ALIGN(A, B) (((A) + (B)-1) / (B) * (B))
+#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
+
+#endif
diff --git a/vc4.c b/vc4.c
new file mode 100644
index 0000000..99896b9
--- /dev/null
+++ b/vc4.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_VC4
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <vc4_drm.h>
+#include <xf86drm.h>
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+						  DRM_FORMAT_XRGB8888 };
+
+static int vc4_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			 uint32_t flags)
+{
+	int ret;
+	size_t plane;
+	uint32_t stride;
+	struct drm_vc4_create_bo bo_create;
+
+	/*
+	 * Since the ARM L1 cache line size is 64 bytes, align to that as a
+	 * performance optimization.
+	 */
+	stride = drv_stride_from_format(format, width, 0);
+	stride = ALIGN(stride, 64);
+	drv_bo_from_format(bo, stride, height, format);
+
+	memset(&bo_create, 0, sizeof(bo_create));
+	bo_create.size = bo->total_size;
+
+	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
+	if (ret) {
+		fprintf(stderr, "drv: DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n",
+			bo->total_size);
+		return ret;
+	}
+
+	for (plane = 0; plane < bo->num_planes; plane++)
+		bo->handles[plane].u32 = bo_create.handle;
+
+	return 0;
+}
+
+static void *vc4_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+{
+	int ret;
+	struct drm_vc4_mmap_bo bo_map;
+
+	memset(&bo_map, 0, sizeof(bo_map));
+	bo_map.handle = bo->handles[0].u32;
+
+	ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map));
+	if (ret) {
+		fprintf(stderr, "drv: DRM_VC4_MMAP_BO failed\n");
+		return MAP_FAILED;
+	}
+
+	data->length = bo->total_size;
+
+	return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+		    bo_map.offset);
+}
+
+struct backend backend_vc4 = {
+	.name = "vc4",
+	.init = vc4_init,
+	.bo_create = vc4_bo_create,
+	.bo_import = drv_prime_bo_import,
+	.bo_destroy = drv_gem_bo_destroy,
+	.bo_map = vc4_bo_map,
+};
+
+#endif
diff --git a/vgem.c b/vgem.c
new file mode 100644
index 0000000..95aee0a
--- /dev/null
+++ b/vgem.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#define MESA_LLVMPIPE_TILE_ORDER 6
+#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+						  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
+						  DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+						   DRM_FORMAT_YVU420_ANDROID };
+
+static int vgem_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+static int vgem_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+			  uint32_t flags)
+{
+	int ret = drv_dumb_bo_create(bo, ALIGN(width, MESA_LLVMPIPE_TILE_SIZE),
+				     ALIGN(height, MESA_LLVMPIPE_TILE_SIZE), format, flags);
+	return ret;
+}
+
+static uint32_t vgem_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+		/*HACK: See b/28671744 */
+		return DRM_FORMAT_XBGR8888;
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_YVU420_ANDROID;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_vgem = {
+	.name = "vgem",
+	.init = vgem_init,
+	.bo_create = vgem_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+	.resolve_format = vgem_resolve_format,
+};
diff --git a/virtio_gpu.c b/virtio_gpu.c
new file mode 100644
index 0000000..5780600
--- /dev/null
+++ b/virtio_gpu.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#define MESA_LLVMPIPE_TILE_ORDER 6
+#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+						  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
+						  DRM_FORMAT_XRGB8888 };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+						   DRM_FORMAT_YVU420_ANDROID };
+
+static int virtio_gpu_init(struct driver *drv)
+{
+	int ret;
+	ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+				   &LINEAR_METADATA, BO_USE_RENDER_MASK);
+	if (ret)
+		return ret;
+
+	ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+				   &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+	if (ret)
+		return ret;
+
+	return drv_modify_linear_combinations(drv);
+}
+
+static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+				uint32_t flags)
+{
+	int ret = drv_dumb_bo_create(bo, ALIGN(width, MESA_LLVMPIPE_TILE_SIZE),
+				     ALIGN(height, MESA_LLVMPIPE_TILE_SIZE), format, flags);
+	return ret;
+}
+
+static uint32_t virtio_gpu_resolve_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+		/*HACK: See b/28671744 */
+		return DRM_FORMAT_XBGR8888;
+	case DRM_FORMAT_FLEX_YCbCr_420_888:
+		return DRM_FORMAT_YVU420_ANDROID;
+	default:
+		return format;
+	}
+}
+
+struct backend backend_virtio_gpu = {
+	.name = "virtio_gpu",
+	.init = virtio_gpu_init,
+	.bo_create = virtio_gpu_bo_create,
+	.bo_destroy = drv_dumb_bo_destroy,
+	.bo_import = drv_prime_bo_import,
+	.bo_map = drv_dumb_bo_map,
+	.resolve_format = virtio_gpu_resolve_format,
+};