release-request-7301ee7e-9b79-4370-be3e-0da223029442-for-git_oc-mr1-release-4153141 snap-temp-L98100000079520883

Change-Id: Ifa91640fa2e47913c9f3636d519724a8eddabb8c
diff --git a/vda/Android.mk b/vda/Android.mk
index fcb4082..08a88ea 100644
--- a/vda/Android.mk
+++ b/vda/Android.mk
@@ -12,7 +12,9 @@
         h264_parser.cc      \
         picture.cc          \
         ranges.cc           \
+        shared_memory_region.cc \
         v4l2_device.cc      \
+        v4l2_slice_video_decode_accelerator.cc \
         video_codecs.cc     \
         video_decode_accelerator.cc \
         vp8_bool_decoder.cc \
@@ -27,8 +29,10 @@
         vp9_raw_bits_reader.cc \
         vp9_uncompressed_header_parser.cc \
 
+# gtest/include is for included file from libchrome/base/gtest_prod_util.h
 LOCAL_C_INCLUDES += \
         $(TOP)/external/libchrome \
+        $(TOP)/external/gtest/include \
 
 LOCAL_MODULE:= libv4l2_codec2_vda
 
diff --git a/vda/h264_dpb.cc b/vda/h264_dpb.cc
index fb4a98d..0e1b411 100644
--- a/vda/h264_dpb.cc
+++ b/vda/h264_dpb.cc
@@ -46,6 +46,10 @@
 
 H264Picture::~H264Picture() {}
 
+V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
+  return nullptr;
+}
+
 H264DPB::H264DPB() : max_num_pics_(0) {}
 H264DPB::~H264DPB() {}
 
diff --git a/vda/h264_dpb.h b/vda/h264_dpb.h
index 703f5e3..6be9f21 100644
--- a/vda/h264_dpb.h
+++ b/vda/h264_dpb.h
@@ -18,6 +18,8 @@
 
 namespace media {
 
+class V4L2H264Picture;
+
 // A picture (a frame or a field) in the H.264 spec sense.
 // See spec at http://www.itu.int/rec/T-REC-H.264
 class H264Picture : public base::RefCounted<H264Picture> {
@@ -32,6 +34,8 @@
 
   H264Picture();
 
+  virtual V4L2H264Picture* AsV4L2H264Picture();
+
   // Values calculated per H.264 specification or taken from slice header.
   // See spec for more details on each (some names have been converted from
   // CamelCase in spec to Chromium-style names).
diff --git a/vda/shared_memory_region.cc b/vda/shared_memory_region.cc
new file mode 100644
index 0000000..ed56559
--- /dev/null
+++ b/vda/shared_memory_region.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+#include "shared_memory_region.h"
+
+namespace media {
+
+SharedMemoryRegion::SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+                                       off_t offset,
+                                       size_t size,
+                                       bool read_only)
+    : shm_(handle, read_only),
+      offset_(offset),
+      size_(size),
+      alignment_size_(offset % base::SysInfo::VMAllocationGranularity()) {
+  DCHECK_GE(offset_, 0) << "Invalid offset: " << offset_;
+}
+
+SharedMemoryRegion::SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer,
+                                       bool read_only)
+    : SharedMemoryRegion(bitstream_buffer.handle(),
+                         bitstream_buffer.offset(),
+                         bitstream_buffer.size(),
+                         read_only) {}
+
+bool SharedMemoryRegion::Map() {
+  if (offset_ < 0) {
+    DVLOG(1) << "Invalid offset: " << offset_;
+    return false;
+  }
+  return shm_.MapAt(offset_ - alignment_size_, size_ + alignment_size_);
+}
+
+void* SharedMemoryRegion::memory() {
+  int8_t* addr = reinterpret_cast<int8_t*>(shm_.memory());
+  return addr ? addr + alignment_size_ : nullptr;
+}
+
+}  // namespace media
diff --git a/vda/shared_memory_region.h b/vda/shared_memory_region.h
new file mode 100644
index 0000000..ce9a322
--- /dev/null
+++ b/vda/shared_memory_region.h
@@ -0,0 +1,56 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SHARED_MEMORY_REGION_H_
+#define SHARED_MEMORY_REGION_H_
+
+#include "base/memory/shared_memory.h"
+#include "bitstream_buffer.h"
+
+namespace media {
+
+// Helper class to access a region of a SharedMemory. Different from
+// SharedMemory, in which the |offset| of function MapAt() must be aligned to
+// the value of |SysInfo::VMAllocationGranularity()|, the |offset| of a
+// SharedMemoryRegion needs not to be aligned, this class hides the details
+// and returns the mapped address of the given offset.
+class SharedMemoryRegion {
+ public:
+  // Creates a SharedMemoryRegion.
+  // The mapped memory region begins at |offset| bytes from the start of the
+  // shared memory and the length is |size|. It will take the ownership of
+  // the |handle| and release the resource when being destroyed. Different
+  // from SharedMemory, the |offset| needs not to be aligned to the value of
+  // |SysInfo::VMAllocationGranularity()|.
+  SharedMemoryRegion(const base::SharedMemoryHandle& handle,
+                     off_t offset,
+                     size_t size,
+                     bool read_only);
+
+  // Creates a SharedMemoryRegion from the given |bistream_buffer|.
+  SharedMemoryRegion(const BitstreamBuffer& bitstream_buffer, bool read_only);
+
+  // Maps the shared memory into the caller's address space.
+  // Return true on success, false otherwise.
+  bool Map();
+
+  // Gets a pointer to the mapped region if it has been mapped via Map().
+  // Returns |nullptr| if it is not mapped. The returned pointer points
+  // to the memory at the offset previously passed to the constructor.
+  void* memory();
+
+  size_t size() const { return size_; }
+
+ private:
+  base::SharedMemory shm_;
+  off_t offset_;
+  size_t size_;
+  size_t alignment_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryRegion);
+};
+
+}  // namespace media
+
+#endif  // SHARED_MEMORY_REGION_H_
diff --git a/vda/v4l2-controls.h b/vda/v4l2-controls.h
new file mode 100644
index 0000000..a8ccf58
--- /dev/null
+++ b/vda/v4l2-controls.h
@@ -0,0 +1,1368 @@
+/*
+ *  Video for Linux Two controls header file
+ *
+ *  Copyright (C) 1999-2012 the contributors
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  Alternatively you can redistribute this file under the terms of the
+ *  BSD license as stated below:
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *  3. The names of its contributors may not be used to endorse or promote
+ *     products derived from this software without specific prior written
+ *     permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ *  TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ *  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  The contents of this header was split off from videodev2.h. All control
+ *  definitions should be added to this header, which is included by
+ *  videodev2.h.
+ */
+
+// Note:
+// This header file is obtained from ChromeOS which is not upstreamed in Linux
+// mainline. This should be removed once it is upstreamed.
+// TODO(johnylin): remove this file once it is upstreamed.
+
+#ifndef __LINUX_V4L2_CONTROLS_H
+#define __LINUX_V4L2_CONTROLS_H
+
+#include <linux/types.h>
+
+/* Control classes */
+#define V4L2_CTRL_CLASS_USER		0x00980000	/* Old-style 'user' controls */
+#define V4L2_CTRL_CLASS_MPEG		0x00990000	/* MPEG-compression controls */
+#define V4L2_CTRL_CLASS_CAMERA		0x009a0000	/* Camera class controls */
+#define V4L2_CTRL_CLASS_FM_TX		0x009b0000	/* FM Modulator controls */
+#define V4L2_CTRL_CLASS_FLASH		0x009c0000	/* Camera flash controls */
+#define V4L2_CTRL_CLASS_JPEG		0x009d0000	/* JPEG-compression controls */
+#define V4L2_CTRL_CLASS_IMAGE_SOURCE	0x009e0000	/* Image source controls */
+#define V4L2_CTRL_CLASS_IMAGE_PROC	0x009f0000	/* Image processing controls */
+#define V4L2_CTRL_CLASS_DV		0x00a00000	/* Digital Video controls */
+#define V4L2_CTRL_CLASS_FM_RX		0x00a10000	/* FM Receiver controls */
+#define V4L2_CTRL_CLASS_RF_TUNER	0x00a20000	/* RF tuner controls */
+#define V4L2_CTRL_CLASS_DETECT		0x00a30000	/* Detection controls */
+
+/* User-class control IDs */
+
+#define V4L2_CID_BASE			(V4L2_CTRL_CLASS_USER | 0x900)
+#define V4L2_CID_USER_BASE 		V4L2_CID_BASE
+#define V4L2_CID_USER_CLASS 		(V4L2_CTRL_CLASS_USER | 1)
+#define V4L2_CID_BRIGHTNESS		(V4L2_CID_BASE+0)
+#define V4L2_CID_CONTRAST		(V4L2_CID_BASE+1)
+#define V4L2_CID_SATURATION		(V4L2_CID_BASE+2)
+#define V4L2_CID_HUE			(V4L2_CID_BASE+3)
+#define V4L2_CID_AUDIO_VOLUME		(V4L2_CID_BASE+5)
+#define V4L2_CID_AUDIO_BALANCE		(V4L2_CID_BASE+6)
+#define V4L2_CID_AUDIO_BASS		(V4L2_CID_BASE+7)
+#define V4L2_CID_AUDIO_TREBLE		(V4L2_CID_BASE+8)
+#define V4L2_CID_AUDIO_MUTE		(V4L2_CID_BASE+9)
+#define V4L2_CID_AUDIO_LOUDNESS		(V4L2_CID_BASE+10)
+#define V4L2_CID_BLACK_LEVEL		(V4L2_CID_BASE+11) /* Deprecated */
+#define V4L2_CID_AUTO_WHITE_BALANCE	(V4L2_CID_BASE+12)
+#define V4L2_CID_DO_WHITE_BALANCE	(V4L2_CID_BASE+13)
+#define V4L2_CID_RED_BALANCE		(V4L2_CID_BASE+14)
+#define V4L2_CID_BLUE_BALANCE		(V4L2_CID_BASE+15)
+#define V4L2_CID_GAMMA			(V4L2_CID_BASE+16)
+#define V4L2_CID_WHITENESS		(V4L2_CID_GAMMA) /* Deprecated */
+#define V4L2_CID_EXPOSURE		(V4L2_CID_BASE+17)
+#define V4L2_CID_AUTOGAIN		(V4L2_CID_BASE+18)
+#define V4L2_CID_GAIN			(V4L2_CID_BASE+19)
+#define V4L2_CID_HFLIP			(V4L2_CID_BASE+20)
+#define V4L2_CID_VFLIP			(V4L2_CID_BASE+21)
+
+#define V4L2_CID_POWER_LINE_FREQUENCY	(V4L2_CID_BASE+24)
+enum v4l2_power_line_frequency {
+	V4L2_CID_POWER_LINE_FREQUENCY_DISABLED	= 0,
+	V4L2_CID_POWER_LINE_FREQUENCY_50HZ	= 1,
+	V4L2_CID_POWER_LINE_FREQUENCY_60HZ	= 2,
+	V4L2_CID_POWER_LINE_FREQUENCY_AUTO	= 3,
+};
+#define V4L2_CID_HUE_AUTO			(V4L2_CID_BASE+25)
+#define V4L2_CID_WHITE_BALANCE_TEMPERATURE	(V4L2_CID_BASE+26)
+#define V4L2_CID_SHARPNESS			(V4L2_CID_BASE+27)
+#define V4L2_CID_BACKLIGHT_COMPENSATION 	(V4L2_CID_BASE+28)
+#define V4L2_CID_CHROMA_AGC                     (V4L2_CID_BASE+29)
+#define V4L2_CID_COLOR_KILLER                   (V4L2_CID_BASE+30)
+#define V4L2_CID_COLORFX			(V4L2_CID_BASE+31)
+enum v4l2_colorfx {
+	V4L2_COLORFX_NONE			= 0,
+	V4L2_COLORFX_BW				= 1,
+	V4L2_COLORFX_SEPIA			= 2,
+	V4L2_COLORFX_NEGATIVE			= 3,
+	V4L2_COLORFX_EMBOSS			= 4,
+	V4L2_COLORFX_SKETCH			= 5,
+	V4L2_COLORFX_SKY_BLUE			= 6,
+	V4L2_COLORFX_GRASS_GREEN		= 7,
+	V4L2_COLORFX_SKIN_WHITEN		= 8,
+	V4L2_COLORFX_VIVID			= 9,
+	V4L2_COLORFX_AQUA			= 10,
+	V4L2_COLORFX_ART_FREEZE			= 11,
+	V4L2_COLORFX_SILHOUETTE			= 12,
+	V4L2_COLORFX_SOLARIZATION		= 13,
+	V4L2_COLORFX_ANTIQUE			= 14,
+	V4L2_COLORFX_SET_CBCR			= 15,
+};
+#define V4L2_CID_AUTOBRIGHTNESS			(V4L2_CID_BASE+32)
+#define V4L2_CID_BAND_STOP_FILTER		(V4L2_CID_BASE+33)
+
+#define V4L2_CID_ROTATE				(V4L2_CID_BASE+34)
+#define V4L2_CID_BG_COLOR			(V4L2_CID_BASE+35)
+
+#define V4L2_CID_CHROMA_GAIN                    (V4L2_CID_BASE+36)
+
+#define V4L2_CID_ILLUMINATORS_1			(V4L2_CID_BASE+37)
+#define V4L2_CID_ILLUMINATORS_2			(V4L2_CID_BASE+38)
+
+#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE	(V4L2_CID_BASE+39)
+#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT		(V4L2_CID_BASE+40)
+
+#define V4L2_CID_ALPHA_COMPONENT		(V4L2_CID_BASE+41)
+#define V4L2_CID_COLORFX_CBCR			(V4L2_CID_BASE+42)
+
+/* last CID + 1 */
+#define V4L2_CID_LASTP1                         (V4L2_CID_BASE+43)
+
+/* USER-class private control IDs */
+
+/* The base for the meye driver controls. See linux/meye.h for the list
+ * of controls. We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_MEYE_BASE			(V4L2_CID_USER_BASE + 0x1000)
+
+/* The base for the bttv driver controls.
+ * We reserve 32 controls for this driver. */
+#define V4L2_CID_USER_BTTV_BASE			(V4L2_CID_USER_BASE + 0x1010)
+
+
+/* The base for the s2255 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_S2255_BASE		(V4L2_CID_USER_BASE + 0x1030)
+
+/* The base for the si476x driver controls. See include/media/si476x.h for the list
+ * of controls. Total of 16 controls is reserved for this driver */
+#define V4L2_CID_USER_SI476X_BASE		(V4L2_CID_USER_BASE + 0x1040)
+
+/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
+ * this driver */
+#define V4L2_CID_USER_TI_VPE_BASE		(V4L2_CID_USER_BASE + 0x1050)
+
+/* The base for the saa7134 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_SAA7134_BASE		(V4L2_CID_USER_BASE + 0x1060)
+
+/* The base for the adv7180 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_ADV7180_BASE		(V4L2_CID_USER_BASE + 0x1070)
+
+/* The base for the tc358743 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_TC358743_BASE		(V4L2_CID_USER_BASE + 0x1080)
+
+/* MPEG-class control IDs */
+/* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
+
+#define V4L2_CID_MPEG_BASE 			(V4L2_CTRL_CLASS_MPEG | 0x900)
+#define V4L2_CID_MPEG_CLASS 			(V4L2_CTRL_CLASS_MPEG | 1)
+
+/*  MPEG streams, specific to multiplexed streams */
+#define V4L2_CID_MPEG_STREAM_TYPE 		(V4L2_CID_MPEG_BASE+0)
+enum v4l2_mpeg_stream_type {
+	V4L2_MPEG_STREAM_TYPE_MPEG2_PS   = 0, /* MPEG-2 program stream */
+	V4L2_MPEG_STREAM_TYPE_MPEG2_TS   = 1, /* MPEG-2 transport stream */
+	V4L2_MPEG_STREAM_TYPE_MPEG1_SS   = 2, /* MPEG-1 system stream */
+	V4L2_MPEG_STREAM_TYPE_MPEG2_DVD  = 3, /* MPEG-2 DVD-compatible stream */
+	V4L2_MPEG_STREAM_TYPE_MPEG1_VCD  = 4, /* MPEG-1 VCD-compatible stream */
+	V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
+};
+#define V4L2_CID_MPEG_STREAM_PID_PMT 		(V4L2_CID_MPEG_BASE+1)
+#define V4L2_CID_MPEG_STREAM_PID_AUDIO 		(V4L2_CID_MPEG_BASE+2)
+#define V4L2_CID_MPEG_STREAM_PID_VIDEO 		(V4L2_CID_MPEG_BASE+3)
+#define V4L2_CID_MPEG_STREAM_PID_PCR 		(V4L2_CID_MPEG_BASE+4)
+#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO 	(V4L2_CID_MPEG_BASE+5)
+#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO 	(V4L2_CID_MPEG_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT 		(V4L2_CID_MPEG_BASE+7)
+enum v4l2_mpeg_stream_vbi_fmt {
+	V4L2_MPEG_STREAM_VBI_FMT_NONE = 0,  /* No VBI in the MPEG stream */
+	V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1,  /* VBI in private packets, IVTV format */
+};
+
+/*  MPEG audio controls specific to multiplexed streams  */
+#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ 	(V4L2_CID_MPEG_BASE+100)
+enum v4l2_mpeg_audio_sampling_freq {
+	V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
+	V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
+	V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
+};
+#define V4L2_CID_MPEG_AUDIO_ENCODING 		(V4L2_CID_MPEG_BASE+101)
+enum v4l2_mpeg_audio_encoding {
+	V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
+	V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
+	V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2,
+	V4L2_MPEG_AUDIO_ENCODING_AAC     = 3,
+	V4L2_MPEG_AUDIO_ENCODING_AC3     = 4,
+};
+#define V4L2_CID_MPEG_AUDIO_L1_BITRATE 		(V4L2_CID_MPEG_BASE+102)
+enum v4l2_mpeg_audio_l1_bitrate {
+	V4L2_MPEG_AUDIO_L1_BITRATE_32K  = 0,
+	V4L2_MPEG_AUDIO_L1_BITRATE_64K  = 1,
+	V4L2_MPEG_AUDIO_L1_BITRATE_96K  = 2,
+	V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3,
+	V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4,
+	V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5,
+	V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6,
+	V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7,
+	V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8,
+	V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9,
+	V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10,
+	V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11,
+	V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
+	V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_L2_BITRATE 		(V4L2_CID_MPEG_BASE+103)
+enum v4l2_mpeg_audio_l2_bitrate {
+	V4L2_MPEG_AUDIO_L2_BITRATE_32K  = 0,
+	V4L2_MPEG_AUDIO_L2_BITRATE_48K  = 1,
+	V4L2_MPEG_AUDIO_L2_BITRATE_56K  = 2,
+	V4L2_MPEG_AUDIO_L2_BITRATE_64K  = 3,
+	V4L2_MPEG_AUDIO_L2_BITRATE_80K  = 4,
+	V4L2_MPEG_AUDIO_L2_BITRATE_96K  = 5,
+	V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6,
+	V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7,
+	V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8,
+	V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9,
+	V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10,
+	V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11,
+	V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
+	V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_L3_BITRATE 		(V4L2_CID_MPEG_BASE+104)
+enum v4l2_mpeg_audio_l3_bitrate {
+	V4L2_MPEG_AUDIO_L3_BITRATE_32K  = 0,
+	V4L2_MPEG_AUDIO_L3_BITRATE_40K  = 1,
+	V4L2_MPEG_AUDIO_L3_BITRATE_48K  = 2,
+	V4L2_MPEG_AUDIO_L3_BITRATE_56K  = 3,
+	V4L2_MPEG_AUDIO_L3_BITRATE_64K  = 4,
+	V4L2_MPEG_AUDIO_L3_BITRATE_80K  = 5,
+	V4L2_MPEG_AUDIO_L3_BITRATE_96K  = 6,
+	V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7,
+	V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8,
+	V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9,
+	V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10,
+	V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11,
+	V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
+	V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
+};
+#define V4L2_CID_MPEG_AUDIO_MODE 		(V4L2_CID_MPEG_BASE+105)
+enum v4l2_mpeg_audio_mode {
+	V4L2_MPEG_AUDIO_MODE_STEREO       = 0,
+	V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
+	V4L2_MPEG_AUDIO_MODE_DUAL         = 2,
+	V4L2_MPEG_AUDIO_MODE_MONO         = 3,
+};
+#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION 	(V4L2_CID_MPEG_BASE+106)
+enum v4l2_mpeg_audio_mode_extension {
+	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4  = 0,
+	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8  = 1,
+	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
+	V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
+};
+#define V4L2_CID_MPEG_AUDIO_EMPHASIS 		(V4L2_CID_MPEG_BASE+107)
+enum v4l2_mpeg_audio_emphasis {
+	V4L2_MPEG_AUDIO_EMPHASIS_NONE         = 0,
+	V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
+	V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17    = 2,
+};
+#define V4L2_CID_MPEG_AUDIO_CRC 		(V4L2_CID_MPEG_BASE+108)
+enum v4l2_mpeg_audio_crc {
+	V4L2_MPEG_AUDIO_CRC_NONE  = 0,
+	V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
+};
+#define V4L2_CID_MPEG_AUDIO_MUTE 		(V4L2_CID_MPEG_BASE+109)
+#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE		(V4L2_CID_MPEG_BASE+110)
+#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE		(V4L2_CID_MPEG_BASE+111)
+enum v4l2_mpeg_audio_ac3_bitrate {
+	V4L2_MPEG_AUDIO_AC3_BITRATE_32K  = 0,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_40K  = 1,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_48K  = 2,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_56K  = 3,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_64K  = 4,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_80K  = 5,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_96K  = 6,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
+	V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
+};
+#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK	(V4L2_CID_MPEG_BASE+112)
+enum v4l2_mpeg_audio_dec_playback {
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO	    = 0,
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO	    = 1,
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT	    = 2,
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT	    = 3,
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO	    = 4,
+	V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
+};
+#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
+
+/*  MPEG video controls specific to multiplexed streams */
+#define V4L2_CID_MPEG_VIDEO_ENCODING 		(V4L2_CID_MPEG_BASE+200)
+enum v4l2_mpeg_video_encoding {
+	V4L2_MPEG_VIDEO_ENCODING_MPEG_1     = 0,
+	V4L2_MPEG_VIDEO_ENCODING_MPEG_2     = 1,
+	V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_ASPECT 		(V4L2_CID_MPEG_BASE+201)
+enum v4l2_mpeg_video_aspect {
+	V4L2_MPEG_VIDEO_ASPECT_1x1     = 0,
+	V4L2_MPEG_VIDEO_ASPECT_4x3     = 1,
+	V4L2_MPEG_VIDEO_ASPECT_16x9    = 2,
+	V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
+};
+#define V4L2_CID_MPEG_VIDEO_B_FRAMES 		(V4L2_CID_MPEG_BASE+202)
+#define V4L2_CID_MPEG_VIDEO_GOP_SIZE 		(V4L2_CID_MPEG_BASE+203)
+#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE 	(V4L2_CID_MPEG_BASE+204)
+#define V4L2_CID_MPEG_VIDEO_PULLDOWN 		(V4L2_CID_MPEG_BASE+205)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE 	(V4L2_CID_MPEG_BASE+206)
+enum v4l2_mpeg_video_bitrate_mode {
+	V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
+	V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_BITRATE 		(V4L2_CID_MPEG_BASE+207)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK 	(V4L2_CID_MPEG_BASE+208)
+#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
+#define V4L2_CID_MPEG_VIDEO_MUTE 		(V4L2_CID_MPEG_BASE+210)
+#define V4L2_CID_MPEG_VIDEO_MUTE_YUV 		(V4L2_CID_MPEG_BASE+211)
+#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE		(V4L2_CID_MPEG_BASE+212)
+#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER	(V4L2_CID_MPEG_BASE+213)
+#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB		(V4L2_CID_MPEG_BASE+214)
+#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE			(V4L2_CID_MPEG_BASE+215)
+#define V4L2_CID_MPEG_VIDEO_HEADER_MODE				(V4L2_CID_MPEG_BASE+216)
+enum v4l2_mpeg_video_header_mode {
+	V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE			= 0,
+	V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME	= 1,
+
+};
+#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC			(V4L2_CID_MPEG_BASE+217)
+#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE		(V4L2_CID_MPEG_BASE+218)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES	(V4L2_CID_MPEG_BASE+219)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB		(V4L2_CID_MPEG_BASE+220)
+#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE		(V4L2_CID_MPEG_BASE+221)
+enum v4l2_mpeg_video_multi_slice_mode {
+	V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE		= 0,
+	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB		= 1,
+	V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES	= 2,
+};
+#define V4L2_CID_MPEG_VIDEO_VBV_SIZE			(V4L2_CID_MPEG_BASE+222)
+#define V4L2_CID_MPEG_VIDEO_DEC_PTS			(V4L2_CID_MPEG_BASE+223)
+#define V4L2_CID_MPEG_VIDEO_DEC_FRAME			(V4L2_CID_MPEG_BASE+224)
+#define V4L2_CID_MPEG_VIDEO_VBV_DELAY			(V4L2_CID_MPEG_BASE+225)
+#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER		(V4L2_CID_MPEG_BASE+226)
+#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+227)
+#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE		(V4L2_CID_MPEG_BASE+228)
+#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME		(V4L2_CID_MPEG_BASE+229)
+
+#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP		(V4L2_CID_MPEG_BASE+300)
+#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP		(V4L2_CID_MPEG_BASE+301)
+#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP		(V4L2_CID_MPEG_BASE+302)
+#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP			(V4L2_CID_MPEG_BASE+303)
+#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP			(V4L2_CID_MPEG_BASE+304)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP		(V4L2_CID_MPEG_BASE+350)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP		(V4L2_CID_MPEG_BASE+351)
+#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP		(V4L2_CID_MPEG_BASE+352)
+#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP			(V4L2_CID_MPEG_BASE+353)
+#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP			(V4L2_CID_MPEG_BASE+354)
+#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM		(V4L2_CID_MPEG_BASE+355)
+#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE		(V4L2_CID_MPEG_BASE+356)
+#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE		(V4L2_CID_MPEG_BASE+357)
+enum v4l2_mpeg_video_h264_entropy_mode {
+	V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC	= 0,
+	V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC	= 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD		(V4L2_CID_MPEG_BASE+358)
+#define V4L2_CID_MPEG_VIDEO_H264_LEVEL			(V4L2_CID_MPEG_BASE+359)
+enum v4l2_mpeg_video_h264_level {
+	V4L2_MPEG_VIDEO_H264_LEVEL_1_0	= 0,
+	V4L2_MPEG_VIDEO_H264_LEVEL_1B	= 1,
+	V4L2_MPEG_VIDEO_H264_LEVEL_1_1	= 2,
+	V4L2_MPEG_VIDEO_H264_LEVEL_1_2	= 3,
+	V4L2_MPEG_VIDEO_H264_LEVEL_1_3	= 4,
+	V4L2_MPEG_VIDEO_H264_LEVEL_2_0	= 5,
+	V4L2_MPEG_VIDEO_H264_LEVEL_2_1	= 6,
+	V4L2_MPEG_VIDEO_H264_LEVEL_2_2	= 7,
+	V4L2_MPEG_VIDEO_H264_LEVEL_3_0	= 8,
+	V4L2_MPEG_VIDEO_H264_LEVEL_3_1	= 9,
+	V4L2_MPEG_VIDEO_H264_LEVEL_3_2	= 10,
+	V4L2_MPEG_VIDEO_H264_LEVEL_4_0	= 11,
+	V4L2_MPEG_VIDEO_H264_LEVEL_4_1	= 12,
+	V4L2_MPEG_VIDEO_H264_LEVEL_4_2	= 13,
+	V4L2_MPEG_VIDEO_H264_LEVEL_5_0	= 14,
+	V4L2_MPEG_VIDEO_H264_LEVEL_5_1	= 15,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA	(V4L2_CID_MPEG_BASE+360)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA	(V4L2_CID_MPEG_BASE+361)
+#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE	(V4L2_CID_MPEG_BASE+362)
+enum v4l2_mpeg_video_h264_loop_filter_mode {
+	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED				= 0,
+	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED				= 1,
+	V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY	= 2,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_PROFILE		(V4L2_CID_MPEG_BASE+363)
+enum v4l2_mpeg_video_h264_profile {
+	V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE			= 0,
+	V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE	= 1,
+	V4L2_MPEG_VIDEO_H264_PROFILE_MAIN			= 2,
+	V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED			= 3,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH			= 4,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10			= 5,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422			= 6,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE	= 7,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA		= 8,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA		= 9,
+	V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA		= 10,
+	V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA		= 11,
+	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE		= 12,
+	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH		= 13,
+	V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA	= 14,
+	V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH		= 15,
+	V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH		= 16,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT	(V4L2_CID_MPEG_BASE+364)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH	(V4L2_CID_MPEG_BASE+365)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE		(V4L2_CID_MPEG_BASE+366)
+#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC		(V4L2_CID_MPEG_BASE+367)
+enum v4l2_mpeg_video_h264_vui_sar_idc {
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED	= 0,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1		= 1,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11		= 2,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11		= 3,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11		= 4,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33		= 5,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11		= 6,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11		= 7,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11		= 8,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33		= 9,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11		= 10,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11		= 11,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33		= 12,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99		= 13,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3		= 14,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2		= 15,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1		= 16,
+	V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED	= 17,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING		(V4L2_CID_MPEG_BASE+368)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0		(V4L2_CID_MPEG_BASE+369)
+#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE	(V4L2_CID_MPEG_BASE+370)
+enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD	= 0,
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN		= 1,
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW		= 2,
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE	= 3,
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM		= 4,
+	V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL		= 5,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO			(V4L2_CID_MPEG_BASE+371)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE		(V4L2_CID_MPEG_BASE+372)
+enum v4l2_mpeg_video_h264_fmo_map_type {
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES		= 0,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES		= 1,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER	= 2,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT			= 3,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN			= 4,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN			= 5,
+	V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT			= 6,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP	(V4L2_CID_MPEG_BASE+373)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION	(V4L2_CID_MPEG_BASE+374)
+enum v4l2_mpeg_video_h264_fmo_change_dir {
+	V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT	= 0,
+	V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT	= 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE	(V4L2_CID_MPEG_BASE+375)
+#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH		(V4L2_CID_MPEG_BASE+376)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO			(V4L2_CID_MPEG_BASE+377)
+#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER	(V4L2_CID_MPEG_BASE+378)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING		(V4L2_CID_MPEG_BASE+379)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE	(V4L2_CID_MPEG_BASE+380)
+enum v4l2_mpeg_video_h264_hierarchical_coding_type {
+	V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B	= 0,
+	V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P	= 1,
+};
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER	(V4L2_CID_MPEG_BASE+381)
+#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP	(V4L2_CID_MPEG_BASE+382)
+#define V4L2_CID_MPEG_VIDEO_H264_SPS		(V4L2_CID_MPEG_BASE+383)
+#define V4L2_CID_MPEG_VIDEO_H264_PPS		(V4L2_CID_MPEG_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX	(V4L2_CID_MPEG_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM	(V4L2_CID_MPEG_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM	(V4L2_CID_MPEG_BASE+387)
+#define V4L2_CID_MPEG_VIDEO_H264_SPS_PPS_BEFORE_IDR		(V4L2_CID_MPEG_BASE+388)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP	(V4L2_CID_MPEG_BASE+400)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP	(V4L2_CID_MPEG_BASE+401)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP	(V4L2_CID_MPEG_BASE+402)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP	(V4L2_CID_MPEG_BASE+403)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP	(V4L2_CID_MPEG_BASE+404)
+#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL		(V4L2_CID_MPEG_BASE+405)
+enum v4l2_mpeg_video_mpeg4_level {
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_0	= 0,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B	= 1,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_1	= 2,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_2	= 3,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_3	= 4,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B	= 5,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_4	= 6,
+	V4L2_MPEG_VIDEO_MPEG4_LEVEL_5	= 7,
+};
+#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE	(V4L2_CID_MPEG_BASE+406)
+enum v4l2_mpeg_video_mpeg4_profile {
+	V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE				= 0,
+	V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE			= 1,
+	V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE				= 2,
+	V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE			= 3,
+	V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY	= 4,
+};
+#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL		(V4L2_CID_MPEG_BASE+407)
+
+/*  Control IDs for VP8 streams
+ *  Although VP8 is not part of MPEG we add these controls to the MPEG class
+ *  as that class is already handling other video compression standards
+ */
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS		(V4L2_CID_MPEG_BASE+500)
+enum v4l2_vp8_num_partitions {
+	V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION	= 0,
+	V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS	= 1,
+	V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS	= 2,
+	V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS	= 3,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4		(V4L2_CID_MPEG_BASE+501)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES		(V4L2_CID_MPEG_BASE+502)
+enum v4l2_vp8_num_ref_frames {
+	V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME	= 0,
+	V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME	= 1,
+	V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME	= 2,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL		(V4L2_CID_MPEG_BASE+503)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS	(V4L2_CID_MPEG_BASE+504)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD	(V4L2_CID_MPEG_BASE+505)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL	(V4L2_CID_MPEG_BASE+506)
+enum v4l2_vp8_golden_frame_sel {
+	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV		= 0,
+	V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD	= 1,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP			(V4L2_CID_MPEG_BASE+507)
+#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP			(V4L2_CID_MPEG_BASE+508)
+#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP		(V4L2_CID_MPEG_BASE+509)
+#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP		(V4L2_CID_MPEG_BASE+510)
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE			(V4L2_CID_MPEG_BASE+511)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR		(V4L2_CID_MPEG_BASE+512)
+
+#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR		(V4L2_CID_MPEG_BASE+513)
+#define V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM		(V4L2_CID_MPEG_BASE+514)
+#define V4L2_CID_MPEG_VIDEO_VP9_ENTROPY			(V4L2_CID_MPEG_BASE+515)
+
+/*  MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
+#define V4L2_CID_MPEG_CX2341X_BASE 				(V4L2_CTRL_CLASS_MPEG | 0x1000)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE 	(V4L2_CID_MPEG_CX2341X_BASE+0)
+enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
+	V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
+	V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO   = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER 		(V4L2_CID_MPEG_CX2341X_BASE+1)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE 	(V4L2_CID_MPEG_CX2341X_BASE+2)
+enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
+	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF                  = 0,
+	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR               = 1,
+	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT              = 2,
+	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE      = 3,
+	V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE 	(V4L2_CID_MPEG_CX2341X_BASE+3)
+enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
+	V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF    = 0,
+	V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE 	(V4L2_CID_MPEG_CX2341X_BASE+4)
+enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
+	V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
+	V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO   = 1,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER 		(V4L2_CID_MPEG_CX2341X_BASE+5)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE 		(V4L2_CID_MPEG_CX2341X_BASE+6)
+enum v4l2_mpeg_cx2341x_video_median_filter_type {
+	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF      = 0,
+	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR      = 1,
+	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT     = 2,
+	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
+	V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG     = 4,
+};
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM 	(V4L2_CID_MPEG_CX2341X_BASE+7)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP 	(V4L2_CID_MPEG_CX2341X_BASE+8)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM	(V4L2_CID_MPEG_CX2341X_BASE+9)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP 	(V4L2_CID_MPEG_CX2341X_BASE+10)
+#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS 	(V4L2_CID_MPEG_CX2341X_BASE+11)
+
+/*  MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
+#define V4L2_CID_MPEG_MFC51_BASE				(V4L2_CTRL_CLASS_MPEG | 0x1100)
+
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY		(V4L2_CID_MPEG_MFC51_BASE+0)
+#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE	(V4L2_CID_MPEG_MFC51_BASE+1)
+#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE			(V4L2_CID_MPEG_MFC51_BASE+2)
+enum v4l2_mpeg_mfc51_video_frame_skip_mode {
+	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED		= 0,
+	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT	= 1,
+	V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT		= 2,
+};
+#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE			(V4L2_CID_MPEG_MFC51_BASE+3)
+enum v4l2_mpeg_mfc51_video_force_frame_type {
+	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED		= 0,
+	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME		= 1,
+	V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED	= 2,
+};
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING				(V4L2_CID_MPEG_MFC51_BASE+4)
+#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV				(V4L2_CID_MPEG_MFC51_BASE+5)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT			(V4L2_CID_MPEG_MFC51_BASE+6)
+#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF			(V4L2_CID_MPEG_MFC51_BASE+7)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY		(V4L2_CID_MPEG_MFC51_BASE+50)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK			(V4L2_CID_MPEG_MFC51_BASE+51)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH		(V4L2_CID_MPEG_MFC51_BASE+52)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC		(V4L2_CID_MPEG_MFC51_BASE+53)
+#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P		(V4L2_CID_MPEG_MFC51_BASE+54)
+
+
+/*  Camera class control IDs */
+
+#define V4L2_CID_CAMERA_CLASS_BASE 	(V4L2_CTRL_CLASS_CAMERA | 0x900)
+#define V4L2_CID_CAMERA_CLASS 		(V4L2_CTRL_CLASS_CAMERA | 1)
+
+#define V4L2_CID_EXPOSURE_AUTO			(V4L2_CID_CAMERA_CLASS_BASE+1)
+enum  v4l2_exposure_auto_type {
+	V4L2_EXPOSURE_AUTO = 0,
+	V4L2_EXPOSURE_MANUAL = 1,
+	V4L2_EXPOSURE_SHUTTER_PRIORITY = 2,
+	V4L2_EXPOSURE_APERTURE_PRIORITY = 3
+};
+#define V4L2_CID_EXPOSURE_ABSOLUTE		(V4L2_CID_CAMERA_CLASS_BASE+2)
+#define V4L2_CID_EXPOSURE_AUTO_PRIORITY		(V4L2_CID_CAMERA_CLASS_BASE+3)
+
+#define V4L2_CID_PAN_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+4)
+#define V4L2_CID_TILT_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+5)
+#define V4L2_CID_PAN_RESET			(V4L2_CID_CAMERA_CLASS_BASE+6)
+#define V4L2_CID_TILT_RESET			(V4L2_CID_CAMERA_CLASS_BASE+7)
+
+#define V4L2_CID_PAN_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+8)
+#define V4L2_CID_TILT_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+9)
+
+#define V4L2_CID_FOCUS_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+10)
+#define V4L2_CID_FOCUS_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+11)
+#define V4L2_CID_FOCUS_AUTO			(V4L2_CID_CAMERA_CLASS_BASE+12)
+
+#define V4L2_CID_ZOOM_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+13)
+#define V4L2_CID_ZOOM_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+14)
+#define V4L2_CID_ZOOM_CONTINUOUS		(V4L2_CID_CAMERA_CLASS_BASE+15)
+
+#define V4L2_CID_PRIVACY			(V4L2_CID_CAMERA_CLASS_BASE+16)
+
+#define V4L2_CID_IRIS_ABSOLUTE			(V4L2_CID_CAMERA_CLASS_BASE+17)
+#define V4L2_CID_IRIS_RELATIVE			(V4L2_CID_CAMERA_CLASS_BASE+18)
+
+#define V4L2_CID_AUTO_EXPOSURE_BIAS		(V4L2_CID_CAMERA_CLASS_BASE+19)
+
+#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE	(V4L2_CID_CAMERA_CLASS_BASE+20)
+enum v4l2_auto_n_preset_white_balance {
+	V4L2_WHITE_BALANCE_MANUAL		= 0,
+	V4L2_WHITE_BALANCE_AUTO			= 1,
+	V4L2_WHITE_BALANCE_INCANDESCENT		= 2,
+	V4L2_WHITE_BALANCE_FLUORESCENT		= 3,
+	V4L2_WHITE_BALANCE_FLUORESCENT_H	= 4,
+	V4L2_WHITE_BALANCE_HORIZON		= 5,
+	V4L2_WHITE_BALANCE_DAYLIGHT		= 6,
+	V4L2_WHITE_BALANCE_FLASH		= 7,
+	V4L2_WHITE_BALANCE_CLOUDY		= 8,
+	V4L2_WHITE_BALANCE_SHADE		= 9,
+};
+
+#define V4L2_CID_WIDE_DYNAMIC_RANGE		(V4L2_CID_CAMERA_CLASS_BASE+21)
+#define V4L2_CID_IMAGE_STABILIZATION		(V4L2_CID_CAMERA_CLASS_BASE+22)
+
+#define V4L2_CID_ISO_SENSITIVITY		(V4L2_CID_CAMERA_CLASS_BASE+23)
+#define V4L2_CID_ISO_SENSITIVITY_AUTO		(V4L2_CID_CAMERA_CLASS_BASE+24)
+enum v4l2_iso_sensitivity_auto_type {
+	V4L2_ISO_SENSITIVITY_MANUAL		= 0,
+	V4L2_ISO_SENSITIVITY_AUTO		= 1,
+};
+
+#define V4L2_CID_EXPOSURE_METERING		(V4L2_CID_CAMERA_CLASS_BASE+25)
+enum v4l2_exposure_metering {
+	V4L2_EXPOSURE_METERING_AVERAGE		= 0,
+	V4L2_EXPOSURE_METERING_CENTER_WEIGHTED	= 1,
+	V4L2_EXPOSURE_METERING_SPOT		= 2,
+	V4L2_EXPOSURE_METERING_MATRIX		= 3,
+};
+
+#define V4L2_CID_SCENE_MODE			(V4L2_CID_CAMERA_CLASS_BASE+26)
+enum v4l2_scene_mode {
+	V4L2_SCENE_MODE_NONE			= 0,
+	V4L2_SCENE_MODE_BACKLIGHT		= 1,
+	V4L2_SCENE_MODE_BEACH_SNOW		= 2,
+	V4L2_SCENE_MODE_CANDLE_LIGHT		= 3,
+	V4L2_SCENE_MODE_DAWN_DUSK		= 4,
+	V4L2_SCENE_MODE_FALL_COLORS		= 5,
+	V4L2_SCENE_MODE_FIREWORKS		= 6,
+	V4L2_SCENE_MODE_LANDSCAPE		= 7,
+	V4L2_SCENE_MODE_NIGHT			= 8,
+	V4L2_SCENE_MODE_PARTY_INDOOR		= 9,
+	V4L2_SCENE_MODE_PORTRAIT		= 10,
+	V4L2_SCENE_MODE_SPORTS			= 11,
+	V4L2_SCENE_MODE_SUNSET			= 12,
+	V4L2_SCENE_MODE_TEXT			= 13,
+};
+
+#define V4L2_CID_3A_LOCK			(V4L2_CID_CAMERA_CLASS_BASE+27)
+#define V4L2_LOCK_EXPOSURE			(1 << 0)
+#define V4L2_LOCK_WHITE_BALANCE			(1 << 1)
+#define V4L2_LOCK_FOCUS				(1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_START		(V4L2_CID_CAMERA_CLASS_BASE+28)
+#define V4L2_CID_AUTO_FOCUS_STOP		(V4L2_CID_CAMERA_CLASS_BASE+29)
+#define V4L2_CID_AUTO_FOCUS_STATUS		(V4L2_CID_CAMERA_CLASS_BASE+30)
+#define V4L2_AUTO_FOCUS_STATUS_IDLE		(0 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_BUSY		(1 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_REACHED		(1 << 1)
+#define V4L2_AUTO_FOCUS_STATUS_FAILED		(1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_RANGE		(V4L2_CID_CAMERA_CLASS_BASE+31)
+enum v4l2_auto_focus_range {
+	V4L2_AUTO_FOCUS_RANGE_AUTO		= 0,
+	V4L2_AUTO_FOCUS_RANGE_NORMAL		= 1,
+	V4L2_AUTO_FOCUS_RANGE_MACRO		= 2,
+	V4L2_AUTO_FOCUS_RANGE_INFINITY		= 3,
+};
+
+#define V4L2_CID_PAN_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+32)
+#define V4L2_CID_TILT_SPEED			(V4L2_CID_CAMERA_CLASS_BASE+33)
+
+/* FM Modulator class control IDs */
+
+#define V4L2_CID_FM_TX_CLASS_BASE		(V4L2_CTRL_CLASS_FM_TX | 0x900)
+#define V4L2_CID_FM_TX_CLASS			(V4L2_CTRL_CLASS_FM_TX | 1)
+
+#define V4L2_CID_RDS_TX_DEVIATION		(V4L2_CID_FM_TX_CLASS_BASE + 1)
+#define V4L2_CID_RDS_TX_PI			(V4L2_CID_FM_TX_CLASS_BASE + 2)
+#define V4L2_CID_RDS_TX_PTY			(V4L2_CID_FM_TX_CLASS_BASE + 3)
+#define V4L2_CID_RDS_TX_PS_NAME			(V4L2_CID_FM_TX_CLASS_BASE + 5)
+#define V4L2_CID_RDS_TX_RADIO_TEXT		(V4L2_CID_FM_TX_CLASS_BASE + 6)
+#define V4L2_CID_RDS_TX_MONO_STEREO		(V4L2_CID_FM_TX_CLASS_BASE + 7)
+#define V4L2_CID_RDS_TX_ARTIFICIAL_HEAD		(V4L2_CID_FM_TX_CLASS_BASE + 8)
+#define V4L2_CID_RDS_TX_COMPRESSED		(V4L2_CID_FM_TX_CLASS_BASE + 9)
+#define V4L2_CID_RDS_TX_DYNAMIC_PTY		(V4L2_CID_FM_TX_CLASS_BASE + 10)
+#define V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT	(V4L2_CID_FM_TX_CLASS_BASE + 11)
+#define V4L2_CID_RDS_TX_TRAFFIC_PROGRAM		(V4L2_CID_FM_TX_CLASS_BASE + 12)
+#define V4L2_CID_RDS_TX_MUSIC_SPEECH		(V4L2_CID_FM_TX_CLASS_BASE + 13)
+#define V4L2_CID_RDS_TX_ALT_FREQS_ENABLE	(V4L2_CID_FM_TX_CLASS_BASE + 14)
+#define V4L2_CID_RDS_TX_ALT_FREQS		(V4L2_CID_FM_TX_CLASS_BASE + 15)
+
+#define V4L2_CID_AUDIO_LIMITER_ENABLED		(V4L2_CID_FM_TX_CLASS_BASE + 64)
+#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 65)
+#define V4L2_CID_AUDIO_LIMITER_DEVIATION	(V4L2_CID_FM_TX_CLASS_BASE + 66)
+
+#define V4L2_CID_AUDIO_COMPRESSION_ENABLED	(V4L2_CID_FM_TX_CLASS_BASE + 80)
+#define V4L2_CID_AUDIO_COMPRESSION_GAIN		(V4L2_CID_FM_TX_CLASS_BASE + 81)
+#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD	(V4L2_CID_FM_TX_CLASS_BASE + 82)
+#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 83)
+#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME	(V4L2_CID_FM_TX_CLASS_BASE + 84)
+
+#define V4L2_CID_PILOT_TONE_ENABLED		(V4L2_CID_FM_TX_CLASS_BASE + 96)
+#define V4L2_CID_PILOT_TONE_DEVIATION		(V4L2_CID_FM_TX_CLASS_BASE + 97)
+#define V4L2_CID_PILOT_TONE_FREQUENCY		(V4L2_CID_FM_TX_CLASS_BASE + 98)
+
+#define V4L2_CID_TUNE_PREEMPHASIS		(V4L2_CID_FM_TX_CLASS_BASE + 112)
+enum v4l2_preemphasis {
+	V4L2_PREEMPHASIS_DISABLED	= 0,
+	V4L2_PREEMPHASIS_50_uS		= 1,
+	V4L2_PREEMPHASIS_75_uS		= 2,
+};
+#define V4L2_CID_TUNE_POWER_LEVEL		(V4L2_CID_FM_TX_CLASS_BASE + 113)
+#define V4L2_CID_TUNE_ANTENNA_CAPACITOR		(V4L2_CID_FM_TX_CLASS_BASE + 114)
+
+
+/* Flash and privacy (indicator) light controls */
+
+#define V4L2_CID_FLASH_CLASS_BASE		(V4L2_CTRL_CLASS_FLASH | 0x900)
+#define V4L2_CID_FLASH_CLASS			(V4L2_CTRL_CLASS_FLASH | 1)
+
+#define V4L2_CID_FLASH_LED_MODE			(V4L2_CID_FLASH_CLASS_BASE + 1)
+enum v4l2_flash_led_mode {
+	V4L2_FLASH_LED_MODE_NONE,
+	V4L2_FLASH_LED_MODE_FLASH,
+	V4L2_FLASH_LED_MODE_TORCH,
+};
+
+#define V4L2_CID_FLASH_STROBE_SOURCE		(V4L2_CID_FLASH_CLASS_BASE + 2)
+enum v4l2_flash_strobe_source {
+	V4L2_FLASH_STROBE_SOURCE_SOFTWARE,
+	V4L2_FLASH_STROBE_SOURCE_EXTERNAL,
+};
+
+#define V4L2_CID_FLASH_STROBE			(V4L2_CID_FLASH_CLASS_BASE + 3)
+#define V4L2_CID_FLASH_STROBE_STOP		(V4L2_CID_FLASH_CLASS_BASE + 4)
+#define V4L2_CID_FLASH_STROBE_STATUS		(V4L2_CID_FLASH_CLASS_BASE + 5)
+
+#define V4L2_CID_FLASH_TIMEOUT			(V4L2_CID_FLASH_CLASS_BASE + 6)
+#define V4L2_CID_FLASH_INTENSITY		(V4L2_CID_FLASH_CLASS_BASE + 7)
+#define V4L2_CID_FLASH_TORCH_INTENSITY		(V4L2_CID_FLASH_CLASS_BASE + 8)
+#define V4L2_CID_FLASH_INDICATOR_INTENSITY	(V4L2_CID_FLASH_CLASS_BASE + 9)
+
+#define V4L2_CID_FLASH_FAULT			(V4L2_CID_FLASH_CLASS_BASE + 10)
+#define V4L2_FLASH_FAULT_OVER_VOLTAGE		(1 << 0)
+#define V4L2_FLASH_FAULT_TIMEOUT		(1 << 1)
+#define V4L2_FLASH_FAULT_OVER_TEMPERATURE	(1 << 2)
+#define V4L2_FLASH_FAULT_SHORT_CIRCUIT		(1 << 3)
+#define V4L2_FLASH_FAULT_OVER_CURRENT		(1 << 4)
+#define V4L2_FLASH_FAULT_INDICATOR		(1 << 5)
+#define V4L2_FLASH_FAULT_UNDER_VOLTAGE		(1 << 6)
+#define V4L2_FLASH_FAULT_INPUT_VOLTAGE		(1 << 7)
+#define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE	(1 << 8)
+
+#define V4L2_CID_FLASH_CHARGE			(V4L2_CID_FLASH_CLASS_BASE + 11)
+#define V4L2_CID_FLASH_READY			(V4L2_CID_FLASH_CLASS_BASE + 12)
+
+
+/* JPEG-class control IDs */
+
+#define V4L2_CID_JPEG_CLASS_BASE		(V4L2_CTRL_CLASS_JPEG | 0x900)
+#define V4L2_CID_JPEG_CLASS			(V4L2_CTRL_CLASS_JPEG | 1)
+
+#define	V4L2_CID_JPEG_CHROMA_SUBSAMPLING	(V4L2_CID_JPEG_CLASS_BASE + 1)
+enum v4l2_jpeg_chroma_subsampling {
+	V4L2_JPEG_CHROMA_SUBSAMPLING_444	= 0,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_422	= 1,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_420	= 2,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_411	= 3,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_410	= 4,
+	V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY	= 5,
+};
+#define	V4L2_CID_JPEG_RESTART_INTERVAL		(V4L2_CID_JPEG_CLASS_BASE + 2)
+#define	V4L2_CID_JPEG_COMPRESSION_QUALITY	(V4L2_CID_JPEG_CLASS_BASE + 3)
+
+#define	V4L2_CID_JPEG_ACTIVE_MARKER		(V4L2_CID_JPEG_CLASS_BASE + 4)
+#define	V4L2_JPEG_ACTIVE_MARKER_APP0		(1 << 0)
+#define	V4L2_JPEG_ACTIVE_MARKER_APP1		(1 << 1)
+#define	V4L2_JPEG_ACTIVE_MARKER_COM		(1 << 16)
+#define	V4L2_JPEG_ACTIVE_MARKER_DQT		(1 << 17)
+#define	V4L2_JPEG_ACTIVE_MARKER_DHT		(1 << 18)
+
+
+/* Image source controls */
+#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE	(V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
+#define V4L2_CID_IMAGE_SOURCE_CLASS		(V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
+
+#define V4L2_CID_VBLANK				(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
+#define V4L2_CID_HBLANK				(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
+#define V4L2_CID_ANALOGUE_GAIN			(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+#define V4L2_CID_TEST_PATTERN_RED		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
+#define V4L2_CID_TEST_PATTERN_GREENR		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
+#define V4L2_CID_TEST_PATTERN_BLUE		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
+#define V4L2_CID_TEST_PATTERN_GREENB		(V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
+
+
+/* Image processing controls */
+
+#define V4L2_CID_IMAGE_PROC_CLASS_BASE		(V4L2_CTRL_CLASS_IMAGE_PROC | 0x900)
+#define V4L2_CID_IMAGE_PROC_CLASS		(V4L2_CTRL_CLASS_IMAGE_PROC | 1)
+
+#define V4L2_CID_LINK_FREQ			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
+#define V4L2_CID_PIXEL_RATE			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
+#define V4L2_CID_TEST_PATTERN			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
+
+
+/*  DV-class control IDs defined by V4L2 */
+#define V4L2_CID_DV_CLASS_BASE			(V4L2_CTRL_CLASS_DV | 0x900)
+#define V4L2_CID_DV_CLASS			(V4L2_CTRL_CLASS_DV | 1)
+
+#define	V4L2_CID_DV_TX_HOTPLUG			(V4L2_CID_DV_CLASS_BASE + 1)
+#define	V4L2_CID_DV_TX_RXSENSE			(V4L2_CID_DV_CLASS_BASE + 2)
+#define	V4L2_CID_DV_TX_EDID_PRESENT		(V4L2_CID_DV_CLASS_BASE + 3)
+#define	V4L2_CID_DV_TX_MODE			(V4L2_CID_DV_CLASS_BASE + 4)
+enum v4l2_dv_tx_mode {
+	V4L2_DV_TX_MODE_DVI_D	= 0,
+	V4L2_DV_TX_MODE_HDMI	= 1,
+};
+#define V4L2_CID_DV_TX_RGB_RANGE		(V4L2_CID_DV_CLASS_BASE + 5)
+enum v4l2_dv_rgb_range {
+	V4L2_DV_RGB_RANGE_AUTO	  = 0,
+	V4L2_DV_RGB_RANGE_LIMITED = 1,
+	V4L2_DV_RGB_RANGE_FULL	  = 2,
+};
+
+#define	V4L2_CID_DV_RX_POWER_PRESENT		(V4L2_CID_DV_CLASS_BASE + 100)
+#define V4L2_CID_DV_RX_RGB_RANGE		(V4L2_CID_DV_CLASS_BASE + 101)
+
+#define V4L2_CID_FM_RX_CLASS_BASE		(V4L2_CTRL_CLASS_FM_RX | 0x900)
+#define V4L2_CID_FM_RX_CLASS			(V4L2_CTRL_CLASS_FM_RX | 1)
+
+#define V4L2_CID_TUNE_DEEMPHASIS		(V4L2_CID_FM_RX_CLASS_BASE + 1)
+enum v4l2_deemphasis {
+	V4L2_DEEMPHASIS_DISABLED	= V4L2_PREEMPHASIS_DISABLED,
+	V4L2_DEEMPHASIS_50_uS		= V4L2_PREEMPHASIS_50_uS,
+	V4L2_DEEMPHASIS_75_uS		= V4L2_PREEMPHASIS_75_uS,
+};
+
+#define V4L2_CID_RDS_RECEPTION			(V4L2_CID_FM_RX_CLASS_BASE + 2)
+#define V4L2_CID_RDS_RX_PTY			(V4L2_CID_FM_RX_CLASS_BASE + 3)
+#define V4L2_CID_RDS_RX_PS_NAME			(V4L2_CID_FM_RX_CLASS_BASE + 4)
+#define V4L2_CID_RDS_RX_RADIO_TEXT		(V4L2_CID_FM_RX_CLASS_BASE + 5)
+#define V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT	(V4L2_CID_FM_RX_CLASS_BASE + 6)
+#define V4L2_CID_RDS_RX_TRAFFIC_PROGRAM		(V4L2_CID_FM_RX_CLASS_BASE + 7)
+#define V4L2_CID_RDS_RX_MUSIC_SPEECH		(V4L2_CID_FM_RX_CLASS_BASE + 8)
+
+#define V4L2_CID_RF_TUNER_CLASS_BASE		(V4L2_CTRL_CLASS_RF_TUNER | 0x900)
+#define V4L2_CID_RF_TUNER_CLASS			(V4L2_CTRL_CLASS_RF_TUNER | 1)
+
+#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO	(V4L2_CID_RF_TUNER_CLASS_BASE + 11)
+#define V4L2_CID_RF_TUNER_BANDWIDTH		(V4L2_CID_RF_TUNER_CLASS_BASE + 12)
+#define V4L2_CID_RF_TUNER_RF_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 32)
+#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO		(V4L2_CID_RF_TUNER_CLASS_BASE + 41)
+#define V4L2_CID_RF_TUNER_LNA_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 42)
+#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO	(V4L2_CID_RF_TUNER_CLASS_BASE + 51)
+#define V4L2_CID_RF_TUNER_MIXER_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 52)
+#define V4L2_CID_RF_TUNER_IF_GAIN_AUTO		(V4L2_CID_RF_TUNER_CLASS_BASE + 61)
+#define V4L2_CID_RF_TUNER_IF_GAIN		(V4L2_CID_RF_TUNER_CLASS_BASE + 62)
+#define V4L2_CID_RF_TUNER_PLL_LOCK			(V4L2_CID_RF_TUNER_CLASS_BASE + 91)
+
+
+/*  Detection-class control IDs defined by V4L2 */
+#define V4L2_CID_DETECT_CLASS_BASE		(V4L2_CTRL_CLASS_DETECT | 0x900)
+#define V4L2_CID_DETECT_CLASS			(V4L2_CTRL_CLASS_DETECT | 1)
+
+#define V4L2_CID_DETECT_MD_MODE			(V4L2_CID_DETECT_CLASS_BASE + 1)
+enum v4l2_detect_md_mode {
+	V4L2_DETECT_MD_MODE_DISABLED		= 0,
+	V4L2_DETECT_MD_MODE_GLOBAL		= 1,
+	V4L2_DETECT_MD_MODE_THRESHOLD_GRID	= 2,
+	V4L2_DETECT_MD_MODE_REGION_GRID		= 3,
+};
+#define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD	(V4L2_CID_DETECT_CLASS_BASE + 2)
+#define V4L2_CID_DETECT_MD_THRESHOLD_GRID	(V4L2_CID_DETECT_CLASS_BASE + 3)
+#define V4L2_CID_DETECT_MD_REGION_GRID		(V4L2_CID_DETECT_CLASS_BASE + 4)
+
+
+/* Complex controls */
+
+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG			0x01
+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG			0x02
+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG			0x04
+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG			0x08
+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG			0x10
+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG			0x20
+
+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE		0x01
+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS	0x02
+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO		0x04
+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED	0x08
+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY			0x10
+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD		0x20
+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE			0x40
+struct v4l2_ctrl_h264_sps {
+	__u8 profile_idc;
+	__u8 constraint_set_flags;
+	__u8 level_idc;
+	__u8 seq_parameter_set_id;
+	__u8 chroma_format_idc;
+	__u8 bit_depth_luma_minus8;
+	__u8 bit_depth_chroma_minus8;
+	__u8 log2_max_frame_num_minus4;
+	__u8 pic_order_cnt_type;
+	__u8 log2_max_pic_order_cnt_lsb_minus4;
+	__s32 offset_for_non_ref_pic;
+	__s32 offset_for_top_to_bottom_field;
+	__u8 num_ref_frames_in_pic_order_cnt_cycle;
+	__s32 offset_for_ref_frame[255];
+	__u8 max_num_ref_frames;
+	__u16 pic_width_in_mbs_minus1;
+	__u16 pic_height_in_map_units_minus1;
+	__u8 flags;
+};
+
+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE				0x0001
+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT	0x0002
+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED				0x0004
+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT		0x0008
+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED			0x0010
+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT			0x0020
+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE				0x0040
+#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT			0x0080
+struct v4l2_ctrl_h264_pps {
+	__u8 pic_parameter_set_id;
+	__u8 seq_parameter_set_id;
+	__u8 num_slice_groups_minus1;
+	__u8 num_ref_idx_l0_default_active_minus1;
+	__u8 num_ref_idx_l1_default_active_minus1;
+	__u8 weighted_bipred_idc;
+	__s8 pic_init_qp_minus26;
+	__s8 pic_init_qs_minus26;
+	__s8 chroma_qp_index_offset;
+	__s8 second_chroma_qp_index_offset;
+	__u8 flags;
+};
+
+struct v4l2_ctrl_h264_scaling_matrix {
+	__u8 scaling_list_4x4[6][16];
+	__u8 scaling_list_8x8[6][64];
+};
+
+struct v4l2_h264_weight_factors {
+	__s8 luma_weight[32];
+	__s8 luma_offset[32];
+	__s8 chroma_weight[32][2];
+	__s8 chroma_offset[32][2];
+};
+
+struct v4l2_h264_pred_weight_table {
+	__u8 luma_log2_weight_denom;
+	__u8 chroma_log2_weight_denom;
+	struct v4l2_h264_weight_factors weight_factors[2];
+};
+
+#define V4L2_SLICE_FLAG_FIELD_PIC		0x01
+#define V4L2_SLICE_FLAG_BOTTOM_FIELD		0x02
+#define V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED	0x04
+#define V4L2_SLICE_FLAG_SP_FOR_SWITCH		0x08
+struct v4l2_ctrl_h264_slice_param {
+	/* Size in bytes, including header */
+	__u32 size;
+	/* Offset in bits to slice_data() from the beginning of this slice. */
+	__u32 header_bit_size;
+
+	__u16 first_mb_in_slice;
+	__u8 slice_type;
+	__u8 pic_parameter_set_id;
+	__u8 colour_plane_id;
+	__u16 frame_num;
+	__u16 idr_pic_id;
+	__u16 pic_order_cnt_lsb;
+	__s32 delta_pic_order_cnt_bottom;
+	__s32 delta_pic_order_cnt0;
+	__s32 delta_pic_order_cnt1;
+	__u8 redundant_pic_cnt;
+
+	struct v4l2_h264_pred_weight_table pred_weight_table;
+	/* Size in bits of dec_ref_pic_marking() syntax element. */
+	__u32 dec_ref_pic_marking_bit_size;
+	/* Size in bits of pic order count syntax. */
+	__u32 pic_order_cnt_bit_size;
+
+	__u8 cabac_init_idc;
+	__s8 slice_qp_delta;
+	__s8 slice_qs_delta;
+	__u8 disable_deblocking_filter_idc;
+	__s8 slice_alpha_c0_offset_div2;
+	__s8 slice_beta_offset_div2;
+	__u32 slice_group_change_cycle;
+
+	__u8 num_ref_idx_l0_active_minus1;
+	__u8 num_ref_idx_l1_active_minus1;
+	/*  Entries on each list are indices
+	 *  into v4l2_ctrl_h264_decode_param.dpb[]. */
+	__u8 ref_pic_list0[32];
+	__u8 ref_pic_list1[32];
+
+	__u8 flags;
+};
+
+/* If not set, this entry is unused for reference. */
+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE		0x01
+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM	0x02
+struct v4l2_h264_dpb_entry {
+	__u32 buf_index; /* v4l2_buffer index */
+	__u16 frame_num;
+	__u16 pic_num;
+	/* Note that field is indicated by v4l2_buffer.field */
+	__s32 top_field_order_cnt;
+	__s32 bottom_field_order_cnt;
+	__u8 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+struct v4l2_ctrl_h264_decode_param {
+	__u32 num_slices;
+	__u8 idr_pic_flag;
+	__u8 nal_ref_idc;
+	__s32 top_field_order_cnt;
+	__s32 bottom_field_order_cnt;
+	__u8 ref_pic_list_p0[32];
+	__u8 ref_pic_list_b0[32];
+	__u8 ref_pic_list_b1[32];
+	struct v4l2_h264_dpb_entry dpb[16];
+};
+
+#define V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED              0x01
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP           0x02
+#define V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA  0x04
+struct v4l2_vp8_sgmnt_hdr {
+	__u8 segment_feature_mode;
+
+	__s8 quant_update[4];
+	__s8 lf_update[4];
+	__u8 segment_probs[3];
+
+	__u8 flags;
+};
+
+#define V4L2_VP8_LF_HDR_ADJ_ENABLE	0x01
+#define V4L2_VP8_LF_HDR_DELTA_UPDATE	0x02
+struct v4l2_vp8_loopfilter_hdr {
+	__u8 type;
+	__u8 level;
+	__u8 sharpness_level;
+	__s8 ref_frm_delta_magnitude[4];
+	__s8 mb_mode_delta_magnitude[4];
+
+	__u8 flags;
+};
+
+struct v4l2_vp8_quantization_hdr {
+	__u8 y_ac_qi;
+	__s8 y_dc_delta;
+	__s8 y2_dc_delta;
+	__s8 y2_ac_delta;
+	__s8 uv_dc_delta;
+	__s8 uv_ac_delta;
+	__u16 dequant_factors[4][3][2];
+};
+
+struct v4l2_vp8_entropy_hdr {
+	__u8 coeff_probs[4][8][3][11];
+	__u8 y_mode_probs[4];
+	__u8 uv_mode_probs[3];
+	__u8 mv_probs[2][19];
+};
+
+#define V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL		0x01
+#define V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME		0x02
+#define V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF	0x04
+struct v4l2_ctrl_vp8_frame_hdr {
+	/* 0: keyframe, 1: not a keyframe */
+	__u8 key_frame;
+	__u8 version;
+
+	/* Populated also if not a key frame */
+	__u16 width;
+	__u8 horizontal_scale;
+	__u16 height;
+	__u8 vertical_scale;
+
+	struct v4l2_vp8_sgmnt_hdr sgmnt_hdr;
+	struct v4l2_vp8_loopfilter_hdr lf_hdr;
+	struct v4l2_vp8_quantization_hdr quant_hdr;
+	struct v4l2_vp8_entropy_hdr entropy_hdr;
+
+	__u8 sign_bias_golden;
+	__u8 sign_bias_alternate;
+
+	__u8 prob_skip_false;
+	__u8 prob_intra;
+	__u8 prob_last;
+	__u8 prob_gf;
+
+	__u32 first_part_size;
+	__u32 first_part_offset;
+	/*
+	 * Offset in bits of MB data in first partition,
+	 * i.e. bit offset starting from first_part_offset.
+	 */
+	__u32 macroblock_bit_offset;
+
+	__u8 num_dct_parts;
+	__u32 dct_part_sizes[8];
+
+	__u8 bool_dec_range;
+	__u8 bool_dec_value;
+	__u8 bool_dec_count;
+
+	/* v4l2_buffer indices of reference frames */
+	__u32 last_frame;
+	__u32 golden_frame;
+	__u32 alt_frame;
+
+	__u8 flags;
+};
+
+#define V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED		0x01
+#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP		0x02
+#define V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE	0x04
+#define V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA		0x08
+#define V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE	0x10
+struct v4l2_vp9_segmentation_params {
+	__u8 tree_probs[7];
+	__u8 pred_probs[3];
+	__u8 feature_enabled[8][4];
+	__s16 feature_data[8][4];
+
+	__u8 flags;
+};
+
+#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED		0x01
+#define V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE		0x02
+struct v4l2_vp9_loop_filter_params {
+	__u8 level;
+	__u8 sharpness;
+	__s8 deltas[4];
+	__s8 mode_deltas[2];
+	__u8 lvl_lookup[8][4][2];
+
+	__u8 flags;
+};
+
+#define V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS		0x01
+struct v4l2_vp9_quantization_params {
+	__u8 base_q_idx;
+	__s8 delta_q_y_dc;
+	__s8 delta_q_uv_dc;
+	__s8 delta_q_uv_ac;
+
+	__u8 flags;
+};
+
+#define V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME	0x01
+/* Error resilient mode enabled flag */
+#define V4L2_VP9_FRAME_HDR_FLAG_ERR_RES		0x02
+#define V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA	0x04
+#define V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV	0x08
+#define V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX	0x10
+#define V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE	0x20
+struct v4l2_ctrl_vp9_frame_hdr {
+	__u8 profile;
+	/* 0: keyframe, 1: non-keyframe */
+	__u8 frame_type;
+
+	__u8 bit_depth;
+	__u8 color_space;
+	__u8 color_range;
+	__u8 subsampling_x;
+	__u8 subsampling_y;
+
+	__u32 frame_width;
+	__u32 frame_height;
+	__u32 render_width;
+	__u32 render_height;
+
+	__u8 reset_frame_context;
+
+	__u8 interpolation_filter;
+	__u8 frame_context_idx;
+
+	struct v4l2_vp9_loop_filter_params lf_params;
+	struct v4l2_vp9_quantization_params quant_params;
+	struct v4l2_vp9_segmentation_params sgmnt_params;
+
+	__u8 tile_cols_log2;
+	__u8 tile_rows_log2;
+
+	__u16 header_size_in_bytes;
+
+	__u8 flags;
+};
+
+struct v4l2_vp9_reference_frame {
+	 /* v4l2_buffer index */
+	__u32 buf_index;
+
+	__u32 frame_width;
+	__u32 frame_height;
+	__u8 bit_depth;
+	__u8 subsampling_x;
+	__u8 subsampling_y;
+};
+
+struct v4l2_ctrl_vp9_decode_param {
+	/* v4l2_buffer index for all reference frames (frame slots). */
+	__u32 ref_frames[8];
+
+	/* Active reference frames, [0]: last, [1]: golden, [2]: altref */
+	struct v4l2_vp9_reference_frame active_ref_frames[3];
+};
+
+struct v4l2_vp9_entropy_ctx {
+	__u8 tx_probs_8x8[2][1];
+	__u8 tx_probs_16x16[2][2];
+	__u8 tx_probs_32x32[2][3];
+
+	__u8 coef_probs[4][2][2][6][6][3];
+	__u8 skip_prob[3];
+	__u8 inter_mode_probs[7][3];
+	__u8 interp_filter_probs[4][2];
+	__u8 is_inter_prob[4];
+
+	__u8 comp_mode_prob[5];
+	__u8 single_ref_prob[5][2];
+	__u8 comp_ref_prob[5];
+
+	__u8 y_mode_probs[4][9];
+	__u8 uv_mode_probs[10][9];
+
+	__u8 partition_probs[16][3];
+
+	__u8 mv_joint_probs[3];
+	__u8 mv_sign_prob[2];
+	__u8 mv_class_probs[2][10];
+	__u8 mv_class0_bit_prob[2];
+	__u8 mv_bits_prob[2][10];
+	__u8 mv_class0_fr_probs[2][2][3];
+	__u8 mv_fr_probs[2][3];
+	__u8 mv_class0_hp_prob[2];
+	__u8 mv_hp_prob[2];
+};
+
+/* Entropy context state for current frame (frame_context_idx). */
+struct v4l2_ctrl_vp9_entropy {
+	__u8 tx_mode;
+	__u8 reference_mode;
+
+	/* Entropy context after load_probs2(). */
+	struct v4l2_vp9_entropy_ctx initial_entropy_ctx;
+
+	/*
+	 * Entropy context for the current decoding state: when passed to the
+	 * driver, contains the state of initial_entropy_ctx after parsing the
+	 * compressed header. After decoding is done (after vb2_buffer_done() is
+	 * called on the associated buffer), state as after refresh_probs().
+	 */
+	struct v4l2_vp9_entropy_ctx current_entropy_ctx;
+};
+
+#endif
diff --git a/vda/v4l2_slice_video_decode_accelerator.cc b/vda/v4l2_slice_video_decode_accelerator.cc
new file mode 100644
index 0000000..98e5915
--- /dev/null
+++ b/vda/v4l2_slice_video_decode_accelerator.cc
@@ -0,0 +1,3080 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "v4l2_slice_video_decode_accelerator.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "shared_memory_region.h"
+
+#define LOGF(level) LOG(level) << __func__ << "(): "
+#define DLOGF(level) DLOG(level) << __func__ << "(): "
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define PLOGF(level) PLOG(level) << __func__ << "(): "
+
+#define NOTIFY_ERROR(x)                         \
+  do {                                          \
+    LOGF(ERROR) << "Setting error state:" << x; \
+    SetErrorState(x);                           \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
+  do {                                                          \
+    if (device_->Ioctl(type, arg) != 0) {                       \
+      PLOGF(ERROR) << "ioctl() failed: " << type_str;           \
+      return value;                                             \
+    }                                                           \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
+
+#define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
+
+#define IOCTL_OR_LOG_ERROR(type, arg)              \
+  do {                                             \
+    if (device_->Ioctl(type, arg) != 0)            \
+      PLOGF(ERROR) << "ioctl() failed: " << #type; \
+  } while (0)
+
+namespace media {
+
+// static
+const uint32_t V4L2SliceVideoDecodeAccelerator::supported_input_fourccs_[] = {
+    V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME, V4L2_PIX_FMT_VP9_FRAME,
+};
+
+class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
+    : public base::RefCounted<V4L2DecodeSurface> {
+ public:
+  using ReleaseCB = base::Callback<void(int)>;
+
+  V4L2DecodeSurface(int32_t bitstream_id,
+                    int input_record,
+                    int output_record,
+                    const ReleaseCB& release_cb);
+
+  // Mark the surface as decoded. This will also release all references, as
+  // they are not needed anymore and execute the done callback, if not null.
+  void SetDecoded();
+  bool decoded() const { return decoded_; }
+
+  int32_t bitstream_id() const { return bitstream_id_; }
+  int input_record() const { return input_record_; }
+  int output_record() const { return output_record_; }
+  uint32_t config_store() const { return config_store_; }
+
+  // Take references to each reference surface and keep them until the
+  // target surface is decoded.
+  void SetReferenceSurfaces(
+      const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces);
+
+  // If provided via this method, |done_cb| callback will be executed after
+  // decoding into this surface is finished. The callback is reset afterwards,
+  // so it needs to be set again before each decode operation.
+  void SetDecodeDoneCallback(const base::Closure& done_cb) {
+    DCHECK(done_cb_.is_null());
+    done_cb_ = done_cb;
+  }
+
+  std::string ToString() const;
+
+ private:
+  friend class base::RefCounted<V4L2DecodeSurface>;
+  ~V4L2DecodeSurface();
+
+  int32_t bitstream_id_;
+  int input_record_;
+  int output_record_;
+  uint32_t config_store_;
+
+  bool decoded_;
+  ReleaseCB release_cb_;
+  base::Closure done_cb_;
+
+  std::vector<scoped_refptr<V4L2DecodeSurface>> reference_surfaces_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface);
+};
+
+V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface(
+    int32_t bitstream_id,
+    int input_record,
+    int output_record,
+    const ReleaseCB& release_cb)
+    : bitstream_id_(bitstream_id),
+      input_record_(input_record),
+      output_record_(output_record),
+      config_store_(input_record + 1),
+      decoded_(false),
+      release_cb_(release_cb) {}
+
+V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() {
+  DVLOGF(5) << "Releasing output record id=" << output_record_;
+  release_cb_.Run(output_record_);
+}
+
+void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces(
+    const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces) {
+  DCHECK(reference_surfaces_.empty());
+  reference_surfaces_ = ref_surfaces;
+}
+
+void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() {
+  DCHECK(!decoded_);
+  decoded_ = true;
+
+  // We can now drop references to all reference surfaces for this surface
+  // as we are done with decoding.
+  reference_surfaces_.clear();
+
+  // And finally execute and drop the decode done callback, if set.
+  if (!done_cb_.is_null())
+    base::ResetAndReturn(&done_cb_).Run();
+}
+
+std::string V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString()
+    const {
+  std::string out;
+  base::StringAppendF(&out, "Buffer %d -> %d. ", input_record_, output_record_);
+  base::StringAppendF(&out, "Reference surfaces:");
+  for (const auto& ref : reference_surfaces_) {
+    DCHECK_NE(ref->output_record(), output_record_);
+    base::StringAppendF(&out, " %d", ref->output_record());
+  }
+  return out;
+}
+
+V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
+    : input_id(-1),
+      address(nullptr),
+      length(0),
+      bytes_used(0),
+      at_device(false) {}
+
+V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
+    : at_device(false),
+      at_client(false),
+      picture_id(-1),
+      cleared(false) {}
+
+struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
+  BitstreamBufferRef(
+      base::WeakPtr<VideoDecodeAccelerator::Client>& client,
+      const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+      SharedMemoryRegion* shm,
+      int32_t input_id);
+  ~BitstreamBufferRef();
+  const base::WeakPtr<VideoDecodeAccelerator::Client> client;
+  const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
+  const std::unique_ptr<SharedMemoryRegion> shm;
+  off_t bytes_used;
+  const int32_t input_id;
+};
+
+V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
+    base::WeakPtr<VideoDecodeAccelerator::Client>& client,
+    const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+    SharedMemoryRegion* shm,
+    int32_t input_id)
+    : client(client),
+      client_task_runner(client_task_runner),
+      shm(shm),
+      bytes_used(0),
+      input_id(input_id) {}
+
+V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
+  if (input_id >= 0) {
+    DVLOGF(5) << "returning input_id: " << input_id;
+    client_task_runner->PostTask(
+        FROM_HERE,
+        base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer,
+                   client, input_id));
+  }
+}
+
+V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
+    bool cleared,
+    const Picture& picture)
+    : cleared(cleared), picture(picture) {}
+
+V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
+
+class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
+    : public H264Decoder::H264Accelerator {
+ public:
+  explicit V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
+  ~V4L2H264Accelerator() override;
+
+  // H264Decoder::H264Accelerator implementation.
+  scoped_refptr<H264Picture> CreateH264Picture() override;
+
+  bool SubmitFrameMetadata(const H264SPS* sps,
+                           const H264PPS* pps,
+                           const H264DPB& dpb,
+                           const H264Picture::Vector& ref_pic_listp0,
+                           const H264Picture::Vector& ref_pic_listb0,
+                           const H264Picture::Vector& ref_pic_listb1,
+                           const scoped_refptr<H264Picture>& pic) override;
+
+  bool SubmitSlice(const H264PPS* pps,
+                   const H264SliceHeader* slice_hdr,
+                   const H264Picture::Vector& ref_pic_list0,
+                   const H264Picture::Vector& ref_pic_list1,
+                   const scoped_refptr<H264Picture>& pic,
+                   const uint8_t* data,
+                   size_t size) override;
+
+  bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
+  bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
+
+  void Reset() override;
+
+ private:
+  // Max size of reference list.
+  static const size_t kDPBIndicesListSize = 32;
+  void H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
+                                       uint8_t dst_list[kDPBIndicesListSize]);
+
+  void H264DPBToV4L2DPB(
+      const H264DPB& dpb,
+      std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces);
+
+  scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface(
+      const scoped_refptr<H264Picture>& pic);
+
+  size_t num_slices_;
+  V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
+
+  // TODO(posciak): This should be queried from hardware once supported.
+  static const size_t kMaxSlices = 16;
+  struct v4l2_ctrl_h264_slice_param v4l2_slice_params_[kMaxSlices];
+  struct v4l2_ctrl_h264_decode_param v4l2_decode_param_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator);
+};
+
+class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
+    : public VP8Decoder::VP8Accelerator {
+ public:
+  explicit V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
+  ~V4L2VP8Accelerator() override;
+
+  // VP8Decoder::VP8Accelerator implementation.
+  scoped_refptr<VP8Picture> CreateVP8Picture() override;
+
+  bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
+                    const Vp8FrameHeader* frame_hdr,
+                    const scoped_refptr<VP8Picture>& last_frame,
+                    const scoped_refptr<VP8Picture>& golden_frame,
+                    const scoped_refptr<VP8Picture>& alt_frame) override;
+
+  bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
+
+ private:
+  scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface(
+      const scoped_refptr<VP8Picture>& pic);
+
+  V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator);
+};
+
+class V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator
+    : public VP9Decoder::VP9Accelerator {
+ public:
+  explicit V4L2VP9Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
+  ~V4L2VP9Accelerator() override;
+
+  // VP9Decoder::VP9Accelerator implementation.
+  scoped_refptr<VP9Picture> CreateVP9Picture() override;
+
+  bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+                    const Vp9SegmentationParams& segm_params,
+                    const Vp9LoopFilterParams& lf_params,
+                    const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
+                    const base::Closure& done_cb) override;
+
+  bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
+
+  bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+                       Vp9FrameContext* frame_ctx) override;
+
+  bool IsFrameContextRequired() const override {
+    return device_needs_frame_context_;
+  }
+
+ private:
+  scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface(
+      const scoped_refptr<VP9Picture>& pic);
+
+  bool device_needs_frame_context_;
+
+  V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VP9Accelerator);
+};
+
+// Codec-specific subclasses of software decoder picture classes.
+// This allows us to keep decoders oblivious of our implementation details.
+class V4L2H264Picture : public H264Picture {
+ public:
+  explicit V4L2H264Picture(
+      const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+          dec_surface);
+
+  V4L2H264Picture* AsV4L2H264Picture() override { return this; }
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+  dec_surface() {
+    return dec_surface_;
+  }
+
+ private:
+  ~V4L2H264Picture() override;
+
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+      dec_surface_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture);
+};
+
+V4L2H264Picture::V4L2H264Picture(
+    const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
+
+V4L2H264Picture::~V4L2H264Picture() {}
+
+class V4L2VP8Picture : public VP8Picture {
+ public:
+  explicit V4L2VP8Picture(
+      const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+          dec_surface);
+
+  V4L2VP8Picture* AsV4L2VP8Picture() override { return this; }
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+  dec_surface() {
+    return dec_surface_;
+  }
+
+ private:
+  ~V4L2VP8Picture() override;
+
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+      dec_surface_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture);
+};
+
+V4L2VP8Picture::V4L2VP8Picture(
+    const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
+
+V4L2VP8Picture::~V4L2VP8Picture() {}
+
+class V4L2VP9Picture : public VP9Picture {
+ public:
+  explicit V4L2VP9Picture(
+      const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+          dec_surface);
+
+  V4L2VP9Picture* AsV4L2VP9Picture() override { return this; }
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+  dec_surface() {
+    return dec_surface_;
+  }
+
+ private:
+  ~V4L2VP9Picture() override;
+
+  scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+      dec_surface_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VP9Picture);
+};
+
+V4L2VP9Picture::V4L2VP9Picture(
+    const scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>&
+        dec_surface)
+    : dec_surface_(dec_surface) {}
+
+V4L2VP9Picture::~V4L2VP9Picture() {}
+
+V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
+    const scoped_refptr<V4L2Device>& device)
+    : input_planes_count_(0),
+      output_planes_count_(0),
+      child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+      device_(device),
+      decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
+      device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
+      input_streamon_(false),
+      input_buffer_queued_count_(0),
+      output_streamon_(false),
+      output_buffer_queued_count_(0),
+      video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
+      input_format_fourcc_(0),
+      output_format_fourcc_(0),
+      state_(kUninitialized),
+      output_mode_(Config::OutputMode::ALLOCATE),
+      decoder_flushing_(false),
+      decoder_resetting_(false),
+      surface_set_change_pending_(false),
+      picture_clearing_count_(0),
+      weak_this_factory_(this) {
+  weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() {
+  DVLOGF(2);
+
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK(!decoder_thread_.IsRunning());
+  DCHECK(!device_poll_thread_.IsRunning());
+
+  DCHECK(input_buffer_map_.empty());
+  DCHECK(output_buffer_map_.empty());
+}
+
+void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error) {
+  if (!child_task_runner_->BelongsToCurrentThread()) {
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError,
+                              weak_this_, error));
+    return;
+  }
+
+  if (client_) {
+    client_->NotifyError(error);
+    client_ptr_factory_.reset();
+  }
+}
+
+bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
+                                                 Client* client) {
+  DVLOGF(3) << "profile: " << config.profile;
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(state_, kUninitialized);
+
+  if (config.output_mode != Config::OutputMode::ALLOCATE &&
+      config.output_mode != Config::OutputMode::IMPORT) {
+    NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported";
+    return false;
+  }
+
+  client_ptr_factory_.reset(
+      new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client));
+  client_ = client_ptr_factory_->GetWeakPtr();
+  // If we haven't been set up to decode on separate thread via
+  // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+  // decode tasks.
+  if (!decode_task_runner_) {
+    decode_task_runner_ = child_task_runner_;
+    DCHECK(!decode_client_);
+    decode_client_ = client_;
+  }
+
+  video_profile_ = config.profile;
+
+  // TODO(posciak): This needs to be queried once supported.
+  input_planes_count_ = 1;
+  output_planes_count_ = 1;
+
+  input_format_fourcc_ =
+      V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, true);
+
+  if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
+    DVLOGF(1) << "Failed to open device for profile: " << config.profile
+              << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+    return false;
+  }
+
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    h264_accelerator_.reset(new V4L2H264Accelerator(this));
+    decoder_.reset(new H264Decoder(h264_accelerator_.get()));
+  } else if (video_profile_ >= VP8PROFILE_MIN &&
+             video_profile_ <= VP8PROFILE_MAX) {
+    vp8_accelerator_.reset(new V4L2VP8Accelerator(this));
+    decoder_.reset(new VP8Decoder(vp8_accelerator_.get()));
+  } else if (video_profile_ >= VP9PROFILE_MIN &&
+             video_profile_ <= VP9PROFILE_MAX) {
+    vp9_accelerator_.reset(new V4L2VP9Accelerator(this));
+    decoder_.reset(new VP9Decoder(vp9_accelerator_.get()));
+  } else {
+    NOTREACHED() << "Unsupported profile " << video_profile_;
+    return false;
+  }
+
+  // Capabilities check.
+  struct v4l2_capability caps;
+  const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
+  if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+    LOGF(ERROR) << "ioctl() failed: VIDIOC_QUERYCAP"
+                << ", caps check failed: 0x" << std::hex << caps.capabilities;
+    return false;
+  }
+
+  if (!SetupFormats())
+    return false;
+
+  if (!decoder_thread_.Start()) {
+    DLOGF(ERROR) << "device thread failed to start";
+    return false;
+  }
+  decoder_thread_task_runner_ = decoder_thread_.task_runner();
+
+  state_ = kInitialized;
+  output_mode_ = config.output_mode;
+
+  // InitializeTask will NOTIFY_ERROR on failure.
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
+                            base::Unretained(this)));
+
+  DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(state_, kInitialized);
+
+  if (!CreateInputBuffers())
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+
+  // Output buffers will be created once decoder gives us information
+  // about their size and required count.
+  state_ = kDecoding;
+}
+
+void V4L2SliceVideoDecodeAccelerator::Destroy() {
+  DVLOGF(3);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  if (decoder_thread_.IsRunning()) {
+    decoder_thread_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
+
+    // Wait for tasks to finish/early-exit.
+    decoder_thread_.Stop();
+  }
+
+  delete this;
+  DVLOGF(3) << "Destroyed";
+}
+
+void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  state_ = kError;
+
+  decoder_->Reset();
+
+  decoder_current_bitstream_buffer_.reset();
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+
+  // Stop streaming and the device_poll_thread_.
+  StopDevicePoll(false);
+
+  DestroyInputBuffers();
+  DestroyOutputs(false);
+
+  DCHECK(surfaces_at_device_.empty());
+  DCHECK(surfaces_at_display_.empty());
+  DCHECK(decoder_display_queue_.empty());
+}
+
+static bool IsSupportedOutputFormat(uint32_t v4l2_format) {
+  // Only support V4L2_PIX_FMT_NV12 output format for now.
+  // TODO(johnylin): add more supported format if necessary.
+  uint32_t kSupportedOutputFmtFourcc[] = { V4L2_PIX_FMT_NV12 };
+  return std::find(
+      kSupportedOutputFmtFourcc,
+      kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc),
+      v4l2_format) !=
+          kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc);
+}
+
+bool V4L2SliceVideoDecodeAccelerator::SetupFormats() {
+  DCHECK_EQ(state_, kUninitialized);
+
+  size_t input_size;
+  Size max_resolution, min_resolution;
+  device_->GetSupportedResolution(input_format_fourcc_, &min_resolution,
+                                  &max_resolution);
+  if (max_resolution.width() > 1920 && max_resolution.height() > 1088)
+    input_size = kInputBufferMaxSizeFor4k;
+  else
+    input_size = kInputBufferMaxSizeFor1080p;
+
+  struct v4l2_fmtdesc fmtdesc;
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  bool is_format_supported = false;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (fmtdesc.pixelformat == input_format_fourcc_) {
+      is_format_supported = true;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (!is_format_supported) {
+    DVLOGF(1) << "Input fourcc " << input_format_fourcc_
+              << " not supported by device.";
+    return false;
+  }
+
+  struct v4l2_format format;
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  format.fmt.pix_mp.pixelformat = input_format_fourcc_;
+  format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size;
+  format.fmt.pix_mp.num_planes = input_planes_count_;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  // We have to set up the format for output, because the driver may not allow
+  // changing it once we start streaming; whether it can support our chosen
+  // output format or not may depend on the input format.
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  output_format_fourcc_ = 0;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (IsSupportedOutputFormat(fmtdesc.pixelformat)) {
+      output_format_fourcc_ = fmtdesc.pixelformat;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (output_format_fourcc_ == 0) {
+    LOGF(ERROR) << "Could not find a usable output format";
+    return false;
+  }
+
+  // Only set fourcc for output; resolution, etc., will come from the
+  // driver once it extracts it from the stream.
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+  format.fmt.pix_mp.num_planes = output_planes_count_;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK(!input_streamon_);
+  DCHECK(input_buffer_map_.empty());
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = kNumInputBuffers;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
+  if (reqbufs.count < kNumInputBuffers) {
+    PLOGF(ERROR) << "Could not allocate enough output buffers";
+    return false;
+  }
+  input_buffer_map_.resize(reqbufs.count);
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    free_input_buffers_.push_back(i);
+
+    // Query for the MEMORY_MMAP pointer.
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+    struct v4l2_buffer buffer;
+    memset(&buffer, 0, sizeof(buffer));
+    memset(planes, 0, sizeof(planes));
+    buffer.index = i;
+    buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    buffer.memory = V4L2_MEMORY_MMAP;
+    buffer.m.planes = planes;
+    buffer.length = input_planes_count_;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
+    void* address = device_->Mmap(nullptr,
+                                  buffer.m.planes[0].length,
+                                  PROT_READ | PROT_WRITE,
+                                  MAP_SHARED,
+                                  buffer.m.planes[0].m.mem_offset);
+    if (address == MAP_FAILED) {
+      PLOGF(ERROR) << "mmap() failed";
+      return false;
+    }
+    input_buffer_map_[i].address = address;
+    input_buffer_map_[i].length = buffer.m.planes[0].length;
+  }
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK(!output_streamon_);
+  DCHECK(output_buffer_map_.empty());
+  DCHECK(surfaces_at_display_.empty());
+  DCHECK(surfaces_at_device_.empty());
+
+  visible_size_ = decoder_->GetPicSize();
+  size_t num_pictures = decoder_->GetRequiredNumOfPictures();
+
+  DCHECK_GT(num_pictures, 0u);
+  DCHECK(!visible_size_.IsEmpty());
+
+  struct v4l2_format format;
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+  format.fmt.pix_mp.width = visible_size_.width();
+  format.fmt.pix_mp.height = visible_size_.height();
+  format.fmt.pix_mp.num_planes = input_planes_count_;
+
+  if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0) {
+    PLOGF(ERROR) << "Failed setting format to: " << output_format_fourcc_;
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+
+  coded_size_.SetSize(base::checked_cast<int>(format.fmt.pix_mp.width),
+                      base::checked_cast<int>(format.fmt.pix_mp.height));
+  DCHECK_EQ(coded_size_.width() % 16, 0);
+  DCHECK_EQ(coded_size_.height() % 16, 0);
+
+  if (!Rect(coded_size_).Contains(Rect(visible_size_))) {
+    LOGF(ERROR) << "Got invalid adjusted coded size: "
+                << coded_size_.ToString();
+    return false;
+  }
+
+  DVLOGF(3) << "buffer_count=" << num_pictures
+            << ", visible size=" << visible_size_.ToString()
+            << ", coded size=" << coded_size_.ToString();
+
+  // With ALLOCATE mode the client can sample it as RGB and doesn't need to
+  // know the precise format.
+  VideoPixelFormat pixel_format =
+      (output_mode_ == Config::OutputMode::IMPORT)
+          ? V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)
+          : PIXEL_FORMAT_UNKNOWN;
+
+  child_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers,
+                 client_, num_pictures, pixel_format, coded_size_));
+
+  // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
+  // or event handling while we are waiting for AssignPictureBuffers(). Not
+  // having Pictures available would not have prevented us from making decoding
+  // progress entirely e.g. in the case of H.264 where we could further decode
+  // non-slice NALUs and could even get another resolution change before we were
+  // done with this one. After we get the buffers, we'll go back into kIdle and
+  // kick off further event processing, and eventually go back into kDecoding
+  // once no more events are pending (if any).
+  state_ = kAwaitingPictureBuffers;
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
+         !decoder_thread_.IsRunning());
+  DCHECK(!input_streamon_);
+
+  if (input_buffer_map_.empty())
+    return;
+
+  for (auto& input_record : input_buffer_map_) {
+    if (input_record.address != nullptr)
+      device_->Munmap(input_record.address, input_record.length);
+  }
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
+
+  input_buffer_map_.clear();
+  free_input_buffers_.clear();
+}
+
+void V4L2SliceVideoDecodeAccelerator::DismissPictures(
+    const std::vector<int32_t>& picture_buffer_ids,
+    base::WaitableEvent* done) {
+  DVLOGF(3);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  for (auto picture_buffer_id : picture_buffer_ids) {
+    DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id;
+    client_->DismissPictureBuffer(picture_buffer_id);
+  }
+
+  done->Signal();
+}
+
+void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device) {
+  DVLOGF(4);
+  DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
+
+  bool event_pending;
+  if (!device_->Poll(poll_device, &event_pending)) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  // All processing should happen on ServiceDeviceTask(), since we shouldn't
+  // touch encoder state from this thread.
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask,
+                            base::Unretained(this)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  // ServiceDeviceTask() should only ever be scheduled from DevicePollTask().
+
+  Dequeue();
+  SchedulePollIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (!device_poll_thread_.IsRunning()) {
+    DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
+    return;
+  }
+
+  DCHECK(input_streamon_ || output_streamon_);
+
+  if (input_buffer_queued_count_ + output_buffer_queued_count_ == 0) {
+    DVLOGF(4) << "No buffers queued, will not schedule poll";
+    return;
+  }
+
+  DVLOGF(4) << "Scheduling device poll task";
+
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), true));
+
+  DVLOGF(2) << "buffer counts: "
+            << "INPUT[" << decoder_input_queue_.size() << "]"
+            << " => DEVICE["
+            << free_input_buffers_.size() << "+"
+            << input_buffer_queued_count_ << "/"
+            << input_buffer_map_.size() << "]->["
+            << free_output_buffers_.size() << "+"
+            << output_buffer_queued_count_ << "/"
+            << output_buffer_map_.size() << "]"
+            << " => DISPLAYQ[" << decoder_display_queue_.size() << "]"
+            << " => CLIENT[" << surfaces_at_display_.size() << "]";
+}
+
+void V4L2SliceVideoDecodeAccelerator::Enqueue(
+    const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  const int old_inputs_queued = input_buffer_queued_count_;
+  const int old_outputs_queued = output_buffer_queued_count_;
+
+  if (!EnqueueInputRecord(dec_surface->input_record(),
+                          dec_surface->config_store())) {
+    DVLOGF(1) << "Failed queueing an input buffer";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  if (!EnqueueOutputRecord(dec_surface->output_record())) {
+    DVLOGF(1) << "Failed queueing an output buffer";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  bool inserted =
+      surfaces_at_device_
+          .insert(std::make_pair(dec_surface->output_record(), dec_surface))
+          .second;
+  DCHECK(inserted);
+
+  if (old_inputs_queued == 0 && old_outputs_queued == 0)
+    SchedulePollIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::Dequeue() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  struct v4l2_buffer dqbuf;
+  struct v4l2_plane planes[VIDEO_MAX_PLANES];
+  while (input_buffer_queued_count_ > 0) {
+    DCHECK(input_streamon_);
+    memset(&dqbuf, 0, sizeof(dqbuf));
+    memset(&planes, 0, sizeof(planes));
+    dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    dqbuf.memory = V4L2_MEMORY_MMAP;
+    dqbuf.m.planes = planes;
+    dqbuf.length = input_planes_count_;
+    if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+      if (errno == EAGAIN) {
+        // EAGAIN if we're just out of buffers to dequeue.
+        break;
+      }
+      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    InputRecord& input_record = input_buffer_map_[dqbuf.index];
+    DCHECK(input_record.at_device);
+    input_record.at_device = false;
+    ReuseInputBuffer(dqbuf.index);
+    input_buffer_queued_count_--;
+    DVLOGF(4) << "Dequeued input=" << dqbuf.index
+              << " count: " << input_buffer_queued_count_;
+  }
+
+  while (output_buffer_queued_count_ > 0) {
+    DCHECK(output_streamon_);
+    memset(&dqbuf, 0, sizeof(dqbuf));
+    memset(&planes, 0, sizeof(planes));
+    dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    dqbuf.memory =
+        (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+                                                      : V4L2_MEMORY_DMABUF);
+    dqbuf.m.planes = planes;
+    dqbuf.length = output_planes_count_;
+    if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+      if (errno == EAGAIN) {
+        // EAGAIN if we're just out of buffers to dequeue.
+        break;
+      }
+      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    OutputRecord& output_record = output_buffer_map_[dqbuf.index];
+    DCHECK(output_record.at_device);
+    output_record.at_device = false;
+    output_buffer_queued_count_--;
+    DVLOGF(3) << "Dequeued output=" << dqbuf.index
+              << " count " << output_buffer_queued_count_;
+
+    V4L2DecodeSurfaceByOutputId::iterator it =
+        surfaces_at_device_.find(dqbuf.index);
+    if (it == surfaces_at_device_.end()) {
+      DLOGF(ERROR) << "Got invalid surface from device.";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+    }
+
+    it->second->SetDecoded();
+    surfaces_at_device_.erase(it);
+  }
+
+  // A frame was decoded, see if we can output it.
+  TryOutputSurfaces();
+
+  ProcessPendingEventsIfNeeded();
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::NewEventPending() {
+  // Switch to event processing mode if we are decoding. Otherwise we are either
+  // already in it, or we will potentially switch to it later, after finishing
+  // other tasks.
+  if (state_ == kDecoding)
+    state_ = kIdle;
+
+  ProcessPendingEventsIfNeeded();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::FinishEventProcessing() {
+  DCHECK_EQ(state_, kIdle);
+
+  state_ = kDecoding;
+  ScheduleDecodeBufferTaskIfNeeded();
+
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  // Process pending events, if any, in the correct order.
+  // We always first process the surface set change, as it is an internal
+  // event from the decoder and interleaving it with external requests would
+  // put the decoder in an undefined state.
+  using ProcessFunc = bool (V4L2SliceVideoDecodeAccelerator::*)();
+  const ProcessFunc process_functions[] = {
+      &V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange,
+      &V4L2SliceVideoDecodeAccelerator::FinishFlush,
+      &V4L2SliceVideoDecodeAccelerator::FinishReset,
+      &V4L2SliceVideoDecodeAccelerator::FinishEventProcessing,
+  };
+
+  for (const auto& fn : process_functions) {
+    if (state_ != kIdle)
+      return;
+
+    if (!(this->*fn)())
+      return;
+  }
+}
+
+void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index) {
+  DVLOGF(4) << "Reusing input buffer, index=" << index;
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
+  InputRecord& input_record = input_buffer_map_[index];
+
+  DCHECK(!input_record.at_device);
+  input_record.input_id = -1;
+  input_record.bytes_used = 0;
+
+  DCHECK_EQ(
+      std::count(free_input_buffers_.begin(), free_input_buffers_.end(), index),
+      0);
+  free_input_buffers_.push_back(index);
+}
+
+void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index) {
+  DVLOGF(4) << "Reusing output buffer, index=" << index;
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
+  OutputRecord& output_record = output_buffer_map_[index];
+  DCHECK(!output_record.at_device);
+  DCHECK(!output_record.at_client);
+
+  DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+                       index),
+            0);
+  free_output_buffers_.push_back(index);
+
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
+    int index,
+    uint32_t config_store) {
+  DVLOGF(3);
+  DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
+  DCHECK_GT(config_store, 0u);
+
+  // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
+  InputRecord& input_record = input_buffer_map_[index];
+  DCHECK(!input_record.at_device);
+  struct v4l2_buffer qbuf;
+  struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(qbuf_planes, 0, sizeof(qbuf_planes));
+  qbuf.index = index;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = qbuf_planes;
+  qbuf.m.planes[0].bytesused = input_record.bytes_used;
+  qbuf.length = input_planes_count_;
+  qbuf.config_store = config_store;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  input_record.at_device = true;
+  input_buffer_queued_count_++;
+  DVLOGF(4) << "Enqueued input=" << qbuf.index
+            << " count: " << input_buffer_queued_count_;
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
+  DVLOGF(3);
+  DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
+
+  // Enqueue an output (VIDEO_CAPTURE) buffer.
+  OutputRecord& output_record = output_buffer_map_[index];
+  DCHECK(!output_record.at_device);
+  DCHECK(!output_record.at_client);
+  DCHECK_NE(output_record.picture_id, -1);
+
+  struct v4l2_buffer qbuf;
+  struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(qbuf_planes, 0, sizeof(qbuf_planes));
+  qbuf.index = index;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  if (output_mode_ == Config::OutputMode::ALLOCATE) {
+    qbuf.memory = V4L2_MEMORY_MMAP;
+  } else {
+    qbuf.memory = V4L2_MEMORY_DMABUF;
+    DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size());
+    for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) {
+      DCHECK(output_record.dmabuf_fds[i].is_valid());
+      qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get();
+    }
+  }
+  qbuf.m.planes = qbuf_planes;
+  qbuf.length = output_planes_count_;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  output_record.at_device = true;
+  output_buffer_queued_count_++;
+  DVLOGF(4) << "Enqueued output=" << qbuf.index
+            << " count: " << output_buffer_queued_count_;
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
+  DVLOGF(3) << "Starting device poll";
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK(!device_poll_thread_.IsRunning());
+
+  // Start up the device poll thread and schedule its first DevicePollTask().
+  if (!device_poll_thread_.Start()) {
+    DLOGF(ERROR) << "Device thread failed to start";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  if (!input_streamon_) {
+    __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type);
+    input_streamon_ = true;
+  }
+
+  if (!output_streamon_) {
+    __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type);
+    output_streamon_ = true;
+  }
+
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), true));
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state) {
+  DVLOGF(3) << "Stopping device poll";
+  if (decoder_thread_.IsRunning())
+    DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  // Signal the DevicePollTask() to stop, and stop the device poll thread.
+  if (!device_->SetDevicePollInterrupt()) {
+    PLOGF(ERROR) << "SetDevicePollInterrupt(): failed";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  device_poll_thread_.Stop();
+  DVLOGF(3) << "Device poll thread stopped";
+
+  // Clear the interrupt now, to be sure.
+  if (!device_->ClearDevicePollInterrupt()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+
+  if (!keep_input_state) {
+    if (input_streamon_) {
+      __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+      IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+    }
+    input_streamon_ = false;
+  }
+
+  if (output_streamon_) {
+    __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+  }
+  output_streamon_ = false;
+
+  if (!keep_input_state) {
+    for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+      InputRecord& input_record = input_buffer_map_[i];
+      if (input_record.at_device) {
+        input_record.at_device = false;
+        ReuseInputBuffer(i);
+        input_buffer_queued_count_--;
+      }
+    }
+    DCHECK_EQ(input_buffer_queued_count_, 0);
+  }
+
+  // STREAMOFF makes the driver drop all buffers without decoding and DQBUFing,
+  // so we mark them all as at_device = false and clear surfaces_at_device_.
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    OutputRecord& output_record = output_buffer_map_[i];
+    if (output_record.at_device) {
+      output_record.at_device = false;
+      output_buffer_queued_count_--;
+    }
+  }
+  surfaces_at_device_.clear();
+  DCHECK_EQ(output_buffer_queued_count_, 0);
+
+  // Drop all surfaces that were awaiting decode before being displayed,
+  // since we've just cancelled all outstanding decodes.
+  while (!decoder_display_queue_.empty())
+    decoder_display_queue_.pop();
+
+  DVLOGF(3) << "Device poll stopped";
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::Decode(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+            << ", size=" << bitstream_buffer.size();
+  DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+  if (bitstream_buffer.id() < 0) {
+    LOGF(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+    if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+      base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask,
+                            base::Unretained(this), bitstream_buffer));
+}
+
+void V4L2SliceVideoDecodeAccelerator::DecodeTask(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+            << " size=" << bitstream_buffer.size();
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
+      decode_client_, decode_task_runner_,
+      new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id()));
+
+  // Skip empty buffer.
+  if (bitstream_buffer.size() == 0)
+    return;
+
+  if (!bitstream_record->shm->Map()) {
+    LOGF(ERROR) << "Could not map bitstream_buffer";
+    NOTIFY_ERROR(UNREADABLE_INPUT);
+    return;
+  }
+  DVLOGF(3) << "mapped at=" << bitstream_record->shm->memory();
+
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
+
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK(!decoder_current_bitstream_buffer_);
+
+  if (decoder_input_queue_.empty())
+    return false;
+
+  decoder_current_bitstream_buffer_.reset(
+      decoder_input_queue_.front().release());
+  decoder_input_queue_.pop();
+
+  if (decoder_current_bitstream_buffer_->input_id == kFlushBufferId) {
+    // This is a buffer we queued for ourselves to trigger flush at this time.
+    InitiateFlush();
+    return false;
+  }
+
+  const uint8_t* const data = reinterpret_cast<const uint8_t*>(
+      decoder_current_bitstream_buffer_->shm->memory());
+  const size_t data_size = decoder_current_bitstream_buffer_->shm->size();
+  decoder_->SetStream(data, data_size);
+
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  if (state_ == kDecoding) {
+    decoder_thread_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask,
+                   base::Unretained(this)));
+  }
+}
+
+void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (state_ != kDecoding) {
+    DVLOGF(3) << "Early exit, not in kDecoding";
+    return;
+  }
+
+  while (true) {
+    AcceleratedVideoDecoder::DecodeResult res;
+    res = decoder_->Decode();
+    switch (res) {
+      case AcceleratedVideoDecoder::kAllocateNewSurfaces:
+        DVLOGF(2) << "Decoder requesting a new set of surfaces";
+        InitiateSurfaceSetChange();
+        return;
+
+      case AcceleratedVideoDecoder::kRanOutOfStreamData:
+        decoder_current_bitstream_buffer_.reset();
+        if (!TrySetNewBistreamBuffer())
+          return;
+
+        break;
+
+      case AcceleratedVideoDecoder::kRanOutOfSurfaces:
+        // No more surfaces for the decoder, we'll come back once we have more.
+        DVLOGF(4) << "Ran out of surfaces";
+        return;
+
+      case AcceleratedVideoDecoder::kNeedContextUpdate:
+        DVLOGF(4) << "Awaiting context update";
+        return;
+
+      case AcceleratedVideoDecoder::kDecodeError:
+        DVLOGF(1) << "Error decoding stream";
+        NOTIFY_ERROR(PLATFORM_FAILURE);
+        return;
+    }
+  }
+}
+
+void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
+  DVLOGF(2);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(state_, kDecoding);
+
+  DCHECK(!surface_set_change_pending_);
+  surface_set_change_pending_ = true;
+  NewEventPending();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() {
+  DVLOGF(2);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (!surface_set_change_pending_)
+    return true;
+
+  if (!surfaces_at_device_.empty())
+    return false;
+
+  DCHECK_EQ(state_, kIdle);
+  DCHECK(decoder_display_queue_.empty());
+  // All output buffers should've been returned from decoder and device by now.
+  // The only remaining owner of surfaces may be display (client), and we will
+  // dismiss them when destroying output buffers below.
+  DCHECK_EQ(free_output_buffers_.size() + surfaces_at_display_.size(),
+            output_buffer_map_.size());
+
+  // Keep input queue running while we switch outputs.
+  if (!StopDevicePoll(true)) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+
+  // This will return only once all buffers are dismissed and destroyed.
+  // This does not wait until they are displayed however, as display retains
+  // references to the buffers bound to textures and will release them
+  // after displaying.
+  if (!DestroyOutputs(true)) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+
+  if (!CreateOutputBuffers()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+
+  surface_set_change_pending_ = false;
+  DVLOGF(3) << "Surface set change finished";
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  std::vector<int32_t> picture_buffers_to_dismiss;
+
+  if (output_buffer_map_.empty())
+    return true;
+
+  for (const auto& output_record : output_buffer_map_) {
+    DCHECK(!output_record.at_device);
+    picture_buffers_to_dismiss.push_back(output_record.picture_id);
+  }
+
+  if (dismiss) {
+    DVLOGF(2) << "Scheduling picture dismissal";
+    base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                             base::WaitableEvent::InitialState::NOT_SIGNALED);
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures,
+                              weak_this_, picture_buffers_to_dismiss, &done));
+    done.Wait();
+  }
+
+  // At this point client can't call ReusePictureBuffer on any of the pictures
+  // anymore, so it's safe to destroy.
+  return DestroyOutputBuffers();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
+         !decoder_thread_.IsRunning());
+  DCHECK(!output_streamon_);
+  DCHECK(surfaces_at_device_.empty());
+  DCHECK(decoder_display_queue_.empty());
+  DCHECK_EQ(surfaces_at_display_.size() + free_output_buffers_.size(),
+            output_buffer_map_.size());
+
+  if (output_buffer_map_.empty())
+    return true;
+
+  // It's ok to do this, client will retain references to textures, but we are
+  // not interested in reusing the surfaces anymore.
+  // This will prevent us from reusing old surfaces in case we have some
+  // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore
+  // them, because we have already dismissed them (in DestroyOutputs()).
+  for (const auto& surface_at_display : surfaces_at_display_) {
+    size_t index = surface_at_display.second->output_record();
+    DCHECK_LT(index, output_buffer_map_.size());
+    OutputRecord& output_record = output_buffer_map_[index];
+    DCHECK(output_record.at_client);
+    output_record.at_client = false;
+  }
+  surfaces_at_display_.clear();
+  DCHECK_EQ(free_output_buffers_.size(), output_buffer_map_.size());
+
+  free_output_buffers_.clear();
+  output_buffer_map_.clear();
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
+
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
+    const std::vector<PictureBuffer>& buffers) {
+  DVLOGF(3);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask,
+                 base::Unretained(this), buffers));
+}
+
+void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
+    const std::vector<PictureBuffer>& buffers) {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(state_, kAwaitingPictureBuffers);
+
+  const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures();
+
+  if (buffers.size() < req_buffer_count) {
+    DLOG(ERROR) << "Failed to provide requested picture buffers. "
+                << "(Got " << buffers.size()
+                << ", requested " << req_buffer_count << ")";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  // Allocate the output buffers.
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = buffers.size();
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory =
+      (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+                                                    : V4L2_MEMORY_DMABUF);
+  IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
+
+  if (reqbufs.count != buffers.size()) {
+    DLOGF(ERROR) << "Could not allocate enough output buffers";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  DCHECK(free_output_buffers_.empty());
+  DCHECK(output_buffer_map_.empty());
+  output_buffer_map_.resize(buffers.size());
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    DCHECK(buffers[i].size() == coded_size_);
+
+    OutputRecord& output_record = output_buffer_map_[i];
+    DCHECK(!output_record.at_device);
+    DCHECK(!output_record.at_client);
+    DCHECK_EQ(output_record.picture_id, -1);
+    DCHECK(output_record.dmabuf_fds.empty());
+    DCHECK_EQ(output_record.cleared, false);
+
+    output_record.picture_id = buffers[i].id();
+
+    // This will remain true until ImportBufferForPicture is called, either by
+    // the client, or by ourselves, if we are allocating.
+    output_record.at_client = true;
+    if (output_mode_ == Config::OutputMode::ALLOCATE) {
+      std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer(
+          i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+      if (dmabuf_fds.empty()) {
+        NOTIFY_ERROR(PLATFORM_FAILURE);
+        return;
+      }
+
+      auto passed_dmabuf_fds(base::WrapUnique(
+          new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
+      ImportBufferForPictureTask(output_record.picture_id,
+                                 std::move(passed_dmabuf_fds));
+    }  // else we'll get triggered via ImportBufferForPicture() from client.
+    DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+  }
+
+  if (!StartDevicePoll()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  // Put us in kIdle to allow further event processing.
+  // ProcessPendingEventsIfNeeded() will put us back into kDecoding after all
+  // other pending events are processed successfully.
+  state_ = kIdle;
+  ProcessPendingEventsIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
+    int32_t picture_buffer_id,
+    const std::vector<base::FileDescriptor>& dmabuf_fds) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>()));
+  for (const auto& fd : dmabuf_fds) {
+    DCHECK_NE(fd.fd, -1);
+    passed_dmabuf_fds->push_back(base::ScopedFD(fd.fd));
+  }
+
+  if (output_mode_ != Config::OutputMode::IMPORT) {
+    LOGF(ERROR) << "Cannot import in non-import mode";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
+                 base::Unretained(this), picture_buffer_id,
+                 base::Passed(&passed_dmabuf_fds)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
+    int32_t picture_buffer_id,
+    std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  const auto iter =
+      std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+                   [picture_buffer_id](const OutputRecord& output_record) {
+                     return output_record.picture_id == picture_buffer_id;
+                   });
+  if (iter == output_buffer_map_.end()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ImportBufferForPicture was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that).
+    DVLOGF(3) << "got picture id=" << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  if (!iter->at_client) {
+    LOGF(ERROR) << "Cannot import buffer that not owned by client";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  size_t index = iter - output_buffer_map_.begin();
+  DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+                       index),
+            0);
+
+  DCHECK(!iter->at_device);
+  iter->at_client = false;
+
+  DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
+  iter->dmabuf_fds.swap(*passed_dmabuf_fds);
+  free_output_buffers_.push_back(index);
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
+    int32_t picture_buffer_id) {
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask,
+                 base::Unretained(this), picture_buffer_id));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
+    int32_t picture_buffer_id) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  V4L2DecodeSurfaceByPictureBufferId::iterator it =
+      surfaces_at_display_.find(picture_buffer_id);
+  if (it == surfaces_at_display_.end()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ReusePictureBuffer was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that) and let the sync object get
+    // destroyed.
+    DVLOGF(3) << "got picture id=" << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  OutputRecord& output_record = output_buffer_map_[it->second->output_record()];
+  if (output_record.at_device || !output_record.at_client) {
+    DVLOGF(1) << "picture_buffer_id not reusable";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  DCHECK(!output_record.at_device);
+  output_record.at_client = false;
+
+  surfaces_at_display_.erase(it);
+}
+
+void V4L2SliceVideoDecodeAccelerator::Flush() {
+  DVLOGF(3);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask,
+                            base::Unretained(this)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::FlushTask() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  // Queue an empty buffer which - when reached - will trigger flush sequence.
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
+          decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
+
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  // This will trigger output for all remaining surfaces in the decoder.
+  // However, not all of them may be decoded yet (they would be queued
+  // in hardware then).
+  if (!decoder_->Flush()) {
+    DVLOGF(1) << "Failed flushing the decoder.";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  // Put the decoder in an idle state, ready to resume.
+  decoder_->Reset();
+
+  DCHECK(!decoder_flushing_);
+  decoder_flushing_ = true;
+  NewEventPending();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::FinishFlush() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (!decoder_flushing_)
+    return true;
+
+  if (!surfaces_at_device_.empty())
+    return false;
+
+  DCHECK_EQ(state_, kIdle);
+
+  // At this point, all remaining surfaces are decoded and dequeued, and since
+  // we have already scheduled output for them in InitiateFlush(), their
+  // respective PictureReady calls have been posted (or they have been queued on
+  // pending_picture_ready_). So at this time, once we SendPictureReady(),
+  // we will have all remaining PictureReady() posted to the client and we
+  // can post NotifyFlushDone().
+  DCHECK(decoder_display_queue_.empty());
+
+  // Decoder should have already returned all surfaces and all surfaces are
+  // out of hardware. There can be no other owners of input buffers.
+  DCHECK_EQ(free_input_buffers_.size(), input_buffer_map_.size());
+
+  SendPictureReady();
+
+  decoder_flushing_ = false;
+  DVLOGF(3) << "Flush finished";
+
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyFlushDone, client_));
+
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::Reset() {
+  DVLOGF(3);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask,
+                            base::Unretained(this)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ResetTask() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (decoder_resetting_) {
+    // This is a bug in the client, multiple Reset()s before NotifyResetDone()
+    // are not allowed.
+    NOTREACHED() << "Client should not be requesting multiple Reset()s";
+    return;
+  }
+
+  // Put the decoder in an idle state, ready to resume.
+  decoder_->Reset();
+
+  // Drop all remaining inputs.
+  decoder_current_bitstream_buffer_.reset();
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+
+  decoder_resetting_ = true;
+  NewEventPending();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::FinishReset() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  if (!decoder_resetting_)
+    return true;
+
+  if (!surfaces_at_device_.empty())
+    return false;
+
+  DCHECK_EQ(state_, kIdle);
+  DCHECK(!decoder_flushing_);
+  SendPictureReady();
+
+  // Drop any pending outputs.
+  while (!decoder_display_queue_.empty())
+    decoder_display_queue_.pop();
+
+  // At this point we can have no input buffers in the decoder, because we
+  // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s
+  // having been in kIdle since. We don't have any surfaces in the HW either -
+  // we just checked that surfaces_at_device_.empty(), and inputs are tied
+  // to surfaces. Since there can be no other owners of input buffers, we can
+  // simply mark them all as available.
+  DCHECK_EQ(input_buffer_queued_count_, 0);
+  free_input_buffers_.clear();
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    DCHECK(!input_buffer_map_[i].at_device);
+    ReuseInputBuffer(i);
+  }
+
+  decoder_resetting_ = false;
+  DVLOGF(3) << "Reset finished";
+
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyResetDone, client_));
+
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error) {
+  // We can touch decoder_state_ only if this is the decoder thread or the
+  // decoder thread isn't running.
+  if (decoder_thread_.IsRunning() &&
+      !decoder_thread_task_runner_->BelongsToCurrentThread()) {
+    decoder_thread_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState,
+                              base::Unretained(this), error));
+    return;
+  }
+
+  // Post NotifyError only if we are already initialized, as the API does
+  // not allow doing so before that.
+  if (state_ != kError && state_ != kUninitialized)
+    NotifyError(error);
+
+  state_ = kError;
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
+    V4L2SliceVideoDecodeAccelerator* v4l2_dec)
+    : num_slices_(0), v4l2_dec_(v4l2_dec) {
+  DCHECK(v4l2_dec_);
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {}
+
+scoped_refptr<H264Picture>
+V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
+  scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
+  if (!dec_surface)
+    return nullptr;
+
+  return new V4L2H264Picture(dec_surface);
+}
+
+void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
+    H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
+                                    uint8_t dst_list[kDPBIndicesListSize]) {
+  size_t i;
+  for (i = 0; i < src_pic_list.size() && i < kDPBIndicesListSize; ++i) {
+    const scoped_refptr<H264Picture>& pic = src_pic_list[i];
+    dst_list[i] = pic ? pic->dpb_position : VIDEO_MAX_FRAME;
+  }
+
+  while (i < kDPBIndicesListSize)
+    dst_list[i++] = VIDEO_MAX_FRAME;
+}
+
+void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
+    const H264DPB& dpb,
+    std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces) {
+  memset(v4l2_decode_param_.dpb, 0, sizeof(v4l2_decode_param_.dpb));
+  size_t i = 0;
+  for (const auto& pic : dpb) {
+    if (i >= arraysize(v4l2_decode_param_.dpb)) {
+      DVLOGF(1) << "Invalid DPB size";
+      break;
+    }
+
+    int index = VIDEO_MAX_FRAME;
+    if (!pic->nonexisting) {
+      scoped_refptr<V4L2DecodeSurface> dec_surface =
+          H264PictureToV4L2DecodeSurface(pic);
+      index = dec_surface->output_record();
+      ref_surfaces->push_back(dec_surface);
+    }
+
+    struct v4l2_h264_dpb_entry& entry = v4l2_decode_param_.dpb[i++];
+    entry.buf_index = index;
+    entry.frame_num = pic->frame_num;
+    entry.pic_num = pic->pic_num;
+    entry.top_field_order_cnt = pic->top_field_order_cnt;
+    entry.bottom_field_order_cnt = pic->bottom_field_order_cnt;
+    entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) |
+                  (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0);
+  }
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
+    const H264SPS* sps,
+    const H264PPS* pps,
+    const H264DPB& dpb,
+    const H264Picture::Vector& ref_pic_listp0,
+    const H264Picture::Vector& ref_pic_listb0,
+    const H264Picture::Vector& ref_pic_listb1,
+    const scoped_refptr<H264Picture>& pic) {
+  struct v4l2_ext_control ctrl;
+  std::vector<struct v4l2_ext_control> ctrls;
+
+  struct v4l2_ctrl_h264_sps v4l2_sps;
+  memset(&v4l2_sps, 0, sizeof(v4l2_sps));
+  v4l2_sps.constraint_set_flags =
+      (sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0) |
+      (sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0) |
+      (sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0) |
+      (sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0) |
+      (sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0) |
+      (sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0);
+#define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
+  SPS_TO_V4L2SPS(profile_idc);
+  SPS_TO_V4L2SPS(level_idc);
+  SPS_TO_V4L2SPS(seq_parameter_set_id);
+  SPS_TO_V4L2SPS(chroma_format_idc);
+  SPS_TO_V4L2SPS(bit_depth_luma_minus8);
+  SPS_TO_V4L2SPS(bit_depth_chroma_minus8);
+  SPS_TO_V4L2SPS(log2_max_frame_num_minus4);
+  SPS_TO_V4L2SPS(pic_order_cnt_type);
+  SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4);
+  SPS_TO_V4L2SPS(offset_for_non_ref_pic);
+  SPS_TO_V4L2SPS(offset_for_top_to_bottom_field);
+  SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle);
+
+  static_assert(arraysize(v4l2_sps.offset_for_ref_frame) ==
+                    arraysize(sps->offset_for_ref_frame),
+                "offset_for_ref_frame arrays must be same size");
+  for (size_t i = 0; i < arraysize(v4l2_sps.offset_for_ref_frame); ++i)
+    v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i];
+  SPS_TO_V4L2SPS(max_num_ref_frames);
+  SPS_TO_V4L2SPS(pic_width_in_mbs_minus1);
+  SPS_TO_V4L2SPS(pic_height_in_map_units_minus1);
+#undef SPS_TO_V4L2SPS
+
+#define SET_V4L2_SPS_FLAG_IF(cond, flag) \
+  v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
+  SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag,
+                       V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+  SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag,
+                       V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
+  SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag,
+                       V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
+  SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag,
+                       V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED);
+  SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
+  SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag,
+                       V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+  SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag,
+                       V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
+#undef SET_V4L2_SPS_FLAG_IF
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SPS;
+  ctrl.size = sizeof(v4l2_sps);
+  ctrl.p_h264_sps = &v4l2_sps;
+  ctrls.push_back(ctrl);
+
+  struct v4l2_ctrl_h264_pps v4l2_pps;
+  memset(&v4l2_pps, 0, sizeof(v4l2_pps));
+#define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
+  PPS_TO_V4L2PPS(pic_parameter_set_id);
+  PPS_TO_V4L2PPS(seq_parameter_set_id);
+  PPS_TO_V4L2PPS(num_slice_groups_minus1);
+  PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1);
+  PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1);
+  PPS_TO_V4L2PPS(weighted_bipred_idc);
+  PPS_TO_V4L2PPS(pic_init_qp_minus26);
+  PPS_TO_V4L2PPS(pic_init_qs_minus26);
+  PPS_TO_V4L2PPS(chroma_qp_index_offset);
+  PPS_TO_V4L2PPS(second_chroma_qp_index_offset);
+#undef PPS_TO_V4L2PPS
+
+#define SET_V4L2_PPS_FLAG_IF(cond, flag) \
+  v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
+  SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag,
+                       V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
+  SET_V4L2_PPS_FLAG_IF(
+      bottom_field_pic_order_in_frame_present_flag,
+      V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
+  SET_V4L2_PPS_FLAG_IF(weighted_pred_flag, V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
+  SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag,
+                       V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+  SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag,
+                       V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+  SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag,
+                       V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
+  SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag,
+                       V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
+  SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag,
+                       V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT);
+#undef SET_V4L2_PPS_FLAG_IF
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PPS;
+  ctrl.size = sizeof(v4l2_pps);
+  ctrl.p_h264_pps = &v4l2_pps;
+  ctrls.push_back(ctrl);
+
+  struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix;
+  memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix));
+
+  static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
+                        arraysize(pps->scaling_list4x4) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
+                        arraysize(pps->scaling_list4x4[0]) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
+                        arraysize(pps->scaling_list8x8) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
+                        arraysize(pps->scaling_list8x8[0]),
+                "scaling_lists must be of correct size");
+  static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
+                        arraysize(sps->scaling_list4x4) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
+                        arraysize(sps->scaling_list4x4[0]) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
+                        arraysize(sps->scaling_list8x8) &&
+                    arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
+                        arraysize(sps->scaling_list8x8[0]),
+                "scaling_lists must be of correct size");
+
+  const auto* scaling_list4x4 = &sps->scaling_list4x4[0];
+  const auto* scaling_list8x8 = &sps->scaling_list8x8[0];
+  if (pps->pic_scaling_matrix_present_flag) {
+    scaling_list4x4 = &pps->scaling_list4x4[0];
+    scaling_list8x8 = &pps->scaling_list8x8[0];
+  }
+
+  for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_4x4); ++i) {
+    for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_4x4[i]);
+         ++j) {
+      v4l2_scaling_matrix.scaling_list_4x4[i][j] = scaling_list4x4[i][j];
+    }
+  }
+  for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_8x8); ++i) {
+    for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_8x8[i]);
+         ++j) {
+      v4l2_scaling_matrix.scaling_list_8x8[i][j] = scaling_list8x8[i][j];
+    }
+  }
+
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX;
+  ctrl.size = sizeof(v4l2_scaling_matrix);
+  ctrl.p_h264_scal_mtrx = &v4l2_scaling_matrix;
+  ctrls.push_back(ctrl);
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      H264PictureToV4L2DecodeSurface(pic);
+
+  struct v4l2_ext_controls ext_ctrls;
+  memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+  ext_ctrls.count = ctrls.size();
+  ext_ctrls.controls = &ctrls[0];
+  ext_ctrls.config_store = dec_surface->config_store();
+  v4l2_dec_->SubmitExtControls(&ext_ctrls);
+
+  H264PictureListToDPBIndicesList(ref_pic_listp0,
+                                  v4l2_decode_param_.ref_pic_list_p0);
+  H264PictureListToDPBIndicesList(ref_pic_listb0,
+                                  v4l2_decode_param_.ref_pic_list_b0);
+  H264PictureListToDPBIndicesList(ref_pic_listb1,
+                                  v4l2_decode_param_.ref_pic_list_b1);
+
+  std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+  H264DPBToV4L2DPB(dpb, &ref_surfaces);
+  dec_surface->SetReferenceSurfaces(ref_surfaces);
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
+    const H264PPS* pps,
+    const H264SliceHeader* slice_hdr,
+    const H264Picture::Vector& ref_pic_list0,
+    const H264Picture::Vector& ref_pic_list1,
+    const scoped_refptr<H264Picture>& pic,
+    const uint8_t* data,
+    size_t size) {
+  if (num_slices_ == kMaxSlices) {
+    LOGF(ERROR) << "Over limit of supported slices per frame";
+    return false;
+  }
+
+  struct v4l2_ctrl_h264_slice_param& v4l2_slice_param =
+      v4l2_slice_params_[num_slices_++];
+  memset(&v4l2_slice_param, 0, sizeof(v4l2_slice_param));
+
+  v4l2_slice_param.size = size;
+#define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
+  SHDR_TO_V4L2SPARM(header_bit_size);
+  SHDR_TO_V4L2SPARM(first_mb_in_slice);
+  SHDR_TO_V4L2SPARM(slice_type);
+  SHDR_TO_V4L2SPARM(pic_parameter_set_id);
+  SHDR_TO_V4L2SPARM(colour_plane_id);
+  SHDR_TO_V4L2SPARM(frame_num);
+  SHDR_TO_V4L2SPARM(idr_pic_id);
+  SHDR_TO_V4L2SPARM(pic_order_cnt_lsb);
+  SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom);
+  SHDR_TO_V4L2SPARM(delta_pic_order_cnt0);
+  SHDR_TO_V4L2SPARM(delta_pic_order_cnt1);
+  SHDR_TO_V4L2SPARM(redundant_pic_cnt);
+  SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size);
+  SHDR_TO_V4L2SPARM(cabac_init_idc);
+  SHDR_TO_V4L2SPARM(slice_qp_delta);
+  SHDR_TO_V4L2SPARM(slice_qs_delta);
+  SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc);
+  SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2);
+  SHDR_TO_V4L2SPARM(slice_beta_offset_div2);
+  SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1);
+  SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1);
+  SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size);
+#undef SHDR_TO_V4L2SPARM
+
+#define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
+  v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
+  SET_V4L2_SPARM_FLAG_IF(field_pic_flag, V4L2_SLICE_FLAG_FIELD_PIC);
+  SET_V4L2_SPARM_FLAG_IF(bottom_field_flag, V4L2_SLICE_FLAG_BOTTOM_FIELD);
+  SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag,
+                         V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED);
+  SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag, V4L2_SLICE_FLAG_SP_FOR_SWITCH);
+#undef SET_V4L2_SPARM_FLAG_IF
+
+  struct v4l2_h264_pred_weight_table* pred_weight_table =
+      &v4l2_slice_param.pred_weight_table;
+
+  if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
+       pps->weighted_pred_flag) ||
+      (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
+    pred_weight_table->luma_log2_weight_denom =
+        slice_hdr->luma_log2_weight_denom;
+    pred_weight_table->chroma_log2_weight_denom =
+        slice_hdr->chroma_log2_weight_denom;
+
+    struct v4l2_h264_weight_factors* factorsl0 =
+        &pred_weight_table->weight_factors[0];
+
+    for (int i = 0; i < 32; ++i) {
+      factorsl0->luma_weight[i] =
+          slice_hdr->pred_weight_table_l0.luma_weight[i];
+      factorsl0->luma_offset[i] =
+          slice_hdr->pred_weight_table_l0.luma_offset[i];
+
+      for (int j = 0; j < 2; ++j) {
+        factorsl0->chroma_weight[i][j] =
+            slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
+        factorsl0->chroma_offset[i][j] =
+            slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
+      }
+    }
+
+    if (slice_hdr->IsBSlice()) {
+      struct v4l2_h264_weight_factors* factorsl1 =
+          &pred_weight_table->weight_factors[1];
+
+      for (int i = 0; i < 32; ++i) {
+        factorsl1->luma_weight[i] =
+            slice_hdr->pred_weight_table_l1.luma_weight[i];
+        factorsl1->luma_offset[i] =
+            slice_hdr->pred_weight_table_l1.luma_offset[i];
+
+        for (int j = 0; j < 2; ++j) {
+          factorsl1->chroma_weight[i][j] =
+              slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
+          factorsl1->chroma_offset[i][j] =
+              slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
+        }
+      }
+    }
+  }
+
+  H264PictureListToDPBIndicesList(ref_pic_list0,
+                                  v4l2_slice_param.ref_pic_list0);
+  H264PictureListToDPBIndicesList(ref_pic_list1,
+                                  v4l2_slice_param.ref_pic_list1);
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      H264PictureToV4L2DecodeSurface(pic);
+
+  v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc;
+
+  // TODO(posciak): Don't add start code back here, but have it passed from
+  // the parser.
+  size_t data_copy_size = size + 3;
+  std::unique_ptr<uint8_t[]> data_copy(new uint8_t[data_copy_size]);
+  memset(data_copy.get(), 0, data_copy_size);
+  data_copy[2] = 0x01;
+  memcpy(data_copy.get() + 3, data, size);
+  return v4l2_dec_->SubmitSlice(dec_surface->input_record(), data_copy.get(),
+                                data_copy_size);
+}
+
+bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index,
+                                                  const uint8_t* data,
+                                                  size_t size) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  InputRecord& input_record = input_buffer_map_[index];
+
+  if (input_record.bytes_used + size > input_record.length) {
+    DVLOGF(1) << "Input buffer too small";
+    return false;
+  }
+
+  memcpy(static_cast<uint8_t*>(input_record.address) + input_record.bytes_used,
+         data, size);
+  input_record.bytes_used += size;
+
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
+    struct v4l2_ext_controls* ext_ctrls) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_GT(ext_ctrls->config_store, 0u);
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS, ext_ctrls);
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::GetExtControls(
+    struct v4l2_ext_controls* ext_ctrls) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_GT(ext_ctrls->config_store, 0u);
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_EXT_CTRLS, ext_ctrls);
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::IsCtrlExposed(uint32_t ctrl_id) {
+  struct v4l2_queryctrl query_ctrl;
+  memset(&query_ctrl, 0, sizeof(query_ctrl));
+  query_ctrl.id = ctrl_id;
+
+  return (device_->Ioctl(VIDIOC_QUERYCTRL, &query_ctrl) == 0);
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
+    const scoped_refptr<H264Picture>& pic) {
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      H264PictureToV4L2DecodeSurface(pic);
+
+  v4l2_decode_param_.num_slices = num_slices_;
+  v4l2_decode_param_.idr_pic_flag = pic->idr;
+  v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt;
+  v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt;
+
+  struct v4l2_ext_control ctrl;
+  std::vector<struct v4l2_ext_control> ctrls;
+
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM;
+  ctrl.size = sizeof(v4l2_slice_params_);
+  ctrl.p_h264_slice_param = v4l2_slice_params_;
+  ctrls.push_back(ctrl);
+
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM;
+  ctrl.size = sizeof(v4l2_decode_param_);
+  ctrl.p_h264_decode_param = &v4l2_decode_param_;
+  ctrls.push_back(ctrl);
+
+  struct v4l2_ext_controls ext_ctrls;
+  memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+  ext_ctrls.count = ctrls.size();
+  ext_ctrls.controls = &ctrls[0];
+  ext_ctrls.config_store = dec_surface->config_store();
+  if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
+    return false;
+
+  Reset();
+
+  v4l2_dec_->DecodeSurface(dec_surface);
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
+    const scoped_refptr<H264Picture>& pic) {
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      H264PictureToV4L2DecodeSurface(pic);
+  v4l2_dec_->SurfaceReady(dec_surface);
+  return true;
+}
+
+void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
+  num_slices_ = 0;
+  memset(&v4l2_decode_param_, 0, sizeof(v4l2_decode_param_));
+  memset(&v4l2_slice_params_, 0, sizeof(v4l2_slice_params_));
+}
+
+scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
+    H264PictureToV4L2DecodeSurface(const scoped_refptr<H264Picture>& pic) {
+  V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture();
+  CHECK(v4l2_pic);
+  return v4l2_pic->dec_surface();
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
+    V4L2SliceVideoDecodeAccelerator* v4l2_dec)
+    : v4l2_dec_(v4l2_dec) {
+  DCHECK(v4l2_dec_);
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {}
+
+scoped_refptr<VP8Picture>
+V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
+  scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
+  if (!dec_surface)
+    return nullptr;
+
+  return new V4L2VP8Picture(dec_surface);
+}
+
+#define ARRAY_MEMCPY_CHECKED(to, from)                               \
+  do {                                                               \
+    static_assert(sizeof(to) == sizeof(from),                        \
+                  #from " and " #to " arrays must be of same size"); \
+    memcpy(to, from, sizeof(to));                                    \
+  } while (0)
+
+static void FillV4L2SegmentationHeader(
+    const Vp8SegmentationHeader& vp8_sgmnt_hdr,
+    struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) {
+#define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
+  v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
+  SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled,
+                             V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED);
+  SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map,
+                             V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP);
+  SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data,
+                             V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA);
+#undef SET_V4L2_SPARM_FLAG_IF
+  v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode;
+
+  ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->quant_update,
+                       vp8_sgmnt_hdr.quantizer_update_value);
+  ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->lf_update,
+                       vp8_sgmnt_hdr.lf_update_value);
+  ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->segment_probs,
+                       vp8_sgmnt_hdr.segment_prob);
+}
+
+static void FillV4L2LoopfilterHeader(
+    const Vp8LoopFilterHeader& vp8_loopfilter_hdr,
+    struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) {
+#define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
+  v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
+  SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE);
+  SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update,
+                          V4L2_VP8_LF_HDR_DELTA_UPDATE);
+#undef SET_V4L2_SGMNT_HDR_FLAG_IF
+
+#define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
+  LF_HDR_TO_V4L2_LF_HDR(type);
+  LF_HDR_TO_V4L2_LF_HDR(level);
+  LF_HDR_TO_V4L2_LF_HDR(sharpness_level);
+#undef LF_HDR_TO_V4L2_LF_HDR
+
+  ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->ref_frm_delta_magnitude,
+                       vp8_loopfilter_hdr.ref_frame_delta);
+  ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->mb_mode_delta_magnitude,
+                       vp8_loopfilter_hdr.mb_mode_delta);
+}
+
+static void FillV4L2QuantizationHeader(
+    const Vp8QuantizationHeader& vp8_quant_hdr,
+    struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) {
+  v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi;
+  v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta;
+  v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta;
+  v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta;
+  v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta;
+  v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta;
+}
+
+static void FillV4L2Vp8EntropyHeader(
+    const Vp8EntropyHeader& vp8_entropy_hdr,
+    struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) {
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs,
+                       vp8_entropy_hdr.coeff_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->y_mode_probs,
+                       vp8_entropy_hdr.y_mode_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs,
+                       vp8_entropy_hdr.uv_mode_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs);
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
+    const scoped_refptr<VP8Picture>& pic,
+    const Vp8FrameHeader* frame_hdr,
+    const scoped_refptr<VP8Picture>& last_frame,
+    const scoped_refptr<VP8Picture>& golden_frame,
+    const scoped_refptr<VP8Picture>& alt_frame) {
+  struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr;
+  memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
+
+#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
+  FHDR_TO_V4L2_FHDR(key_frame);
+  FHDR_TO_V4L2_FHDR(version);
+  FHDR_TO_V4L2_FHDR(width);
+  FHDR_TO_V4L2_FHDR(horizontal_scale);
+  FHDR_TO_V4L2_FHDR(height);
+  FHDR_TO_V4L2_FHDR(vertical_scale);
+  FHDR_TO_V4L2_FHDR(sign_bias_golden);
+  FHDR_TO_V4L2_FHDR(sign_bias_alternate);
+  FHDR_TO_V4L2_FHDR(prob_skip_false);
+  FHDR_TO_V4L2_FHDR(prob_intra);
+  FHDR_TO_V4L2_FHDR(prob_last);
+  FHDR_TO_V4L2_FHDR(prob_gf);
+  FHDR_TO_V4L2_FHDR(bool_dec_range);
+  FHDR_TO_V4L2_FHDR(bool_dec_value);
+  FHDR_TO_V4L2_FHDR(bool_dec_count);
+#undef FHDR_TO_V4L2_FHDR
+
+#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
+  v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
+  SET_V4L2_FRM_HDR_FLAG_IF(is_experimental,
+                           V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL);
+  SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME);
+  SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff,
+                           V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF);
+#undef SET_V4L2_FRM_HDR_FLAG_IF
+
+  FillV4L2SegmentationHeader(frame_hdr->segmentation_hdr,
+                             &v4l2_frame_hdr.sgmnt_hdr);
+
+  FillV4L2LoopfilterHeader(frame_hdr->loopfilter_hdr, &v4l2_frame_hdr.lf_hdr);
+
+  FillV4L2QuantizationHeader(frame_hdr->quantization_hdr,
+                             &v4l2_frame_hdr.quant_hdr);
+
+  FillV4L2Vp8EntropyHeader(frame_hdr->entropy_hdr, &v4l2_frame_hdr.entropy_hdr);
+
+  v4l2_frame_hdr.first_part_size =
+      base::checked_cast<__u32>(frame_hdr->first_part_size);
+  v4l2_frame_hdr.first_part_offset =
+      base::checked_cast<__u32>(frame_hdr->first_part_offset);
+  v4l2_frame_hdr.macroblock_bit_offset =
+      base::checked_cast<__u32>(frame_hdr->macroblock_bit_offset);
+  v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions;
+
+  static_assert(arraysize(v4l2_frame_hdr.dct_part_sizes) ==
+                    arraysize(frame_hdr->dct_partition_sizes),
+                "DCT partition size arrays must have equal number of elements");
+  for (size_t i = 0; i < frame_hdr->num_of_dct_partitions &&
+                     i < arraysize(v4l2_frame_hdr.dct_part_sizes);
+       ++i)
+    v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i];
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      VP8PictureToV4L2DecodeSurface(pic);
+  std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+
+  if (last_frame) {
+    scoped_refptr<V4L2DecodeSurface> last_frame_surface =
+        VP8PictureToV4L2DecodeSurface(last_frame);
+    v4l2_frame_hdr.last_frame = last_frame_surface->output_record();
+    ref_surfaces.push_back(last_frame_surface);
+  } else {
+    v4l2_frame_hdr.last_frame = VIDEO_MAX_FRAME;
+  }
+
+  if (golden_frame) {
+    scoped_refptr<V4L2DecodeSurface> golden_frame_surface =
+        VP8PictureToV4L2DecodeSurface(golden_frame);
+    v4l2_frame_hdr.golden_frame = golden_frame_surface->output_record();
+    ref_surfaces.push_back(golden_frame_surface);
+  } else {
+    v4l2_frame_hdr.golden_frame = VIDEO_MAX_FRAME;
+  }
+
+  if (alt_frame) {
+    scoped_refptr<V4L2DecodeSurface> alt_frame_surface =
+        VP8PictureToV4L2DecodeSurface(alt_frame);
+    v4l2_frame_hdr.alt_frame = alt_frame_surface->output_record();
+    ref_surfaces.push_back(alt_frame_surface);
+  } else {
+    v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME;
+  }
+
+  struct v4l2_ext_control ctrl;
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR;
+  ctrl.size = sizeof(v4l2_frame_hdr);
+  ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr;
+
+  struct v4l2_ext_controls ext_ctrls;
+  memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+  ext_ctrls.count = 1;
+  ext_ctrls.controls = &ctrl;
+  ext_ctrls.config_store = dec_surface->config_store();
+
+  if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
+    return false;
+
+  dec_surface->SetReferenceSurfaces(ref_surfaces);
+
+  if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data,
+                              frame_hdr->frame_size))
+    return false;
+
+  v4l2_dec_->DecodeSurface(dec_surface);
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
+    const scoped_refptr<VP8Picture>& pic) {
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      VP8PictureToV4L2DecodeSurface(pic);
+
+  v4l2_dec_->SurfaceReady(dec_surface);
+  return true;
+}
+
+scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::
+    VP8PictureToV4L2DecodeSurface(const scoped_refptr<VP8Picture>& pic) {
+  V4L2VP8Picture* v4l2_pic = pic->AsV4L2VP8Picture();
+  CHECK(v4l2_pic);
+  return v4l2_pic->dec_surface();
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::V4L2VP9Accelerator(
+    V4L2SliceVideoDecodeAccelerator* v4l2_dec)
+    : v4l2_dec_(v4l2_dec) {
+  DCHECK(v4l2_dec_);
+
+  device_needs_frame_context_ =
+      v4l2_dec_->IsCtrlExposed(V4L2_CID_MPEG_VIDEO_VP9_ENTROPY);
+  DVLOG_IF(1, device_needs_frame_context_)
+      << "Device requires frame context parsing";
+}
+
+V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::~V4L2VP9Accelerator() {}
+
+scoped_refptr<VP9Picture>
+V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::CreateVP9Picture() {
+  scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
+  if (!dec_surface)
+    return nullptr;
+
+  return new V4L2VP9Picture(dec_surface);
+}
+
+static void FillV4L2VP9LoopFilterParams(
+    const Vp9LoopFilterParams& vp9_lf_params,
+    struct v4l2_vp9_loop_filter_params* v4l2_lf_params) {
+#define SET_LF_PARAMS_FLAG_IF(cond, flag) \
+  v4l2_lf_params->flags |= ((vp9_lf_params.cond) ? (flag) : 0)
+  SET_LF_PARAMS_FLAG_IF(delta_enabled, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED);
+  SET_LF_PARAMS_FLAG_IF(delta_update, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE);
+#undef SET_LF_PARAMS_FLAG_IF
+
+  v4l2_lf_params->level = vp9_lf_params.level;
+  v4l2_lf_params->sharpness = vp9_lf_params.sharpness;
+
+  ARRAY_MEMCPY_CHECKED(v4l2_lf_params->deltas, vp9_lf_params.ref_deltas);
+  ARRAY_MEMCPY_CHECKED(v4l2_lf_params->mode_deltas, vp9_lf_params.mode_deltas);
+  ARRAY_MEMCPY_CHECKED(v4l2_lf_params->lvl_lookup, vp9_lf_params.lvl);
+}
+
+static void FillV4L2VP9QuantizationParams(
+    const Vp9QuantizationParams& vp9_quant_params,
+    struct v4l2_vp9_quantization_params* v4l2_q_params) {
+#define SET_Q_PARAMS_FLAG_IF(cond, flag) \
+  v4l2_q_params->flags |= ((vp9_quant_params.cond) ? (flag) : 0)
+  SET_Q_PARAMS_FLAG_IF(IsLossless(), V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS);
+#undef SET_Q_PARAMS_FLAG_IF
+
+#define Q_PARAMS_TO_V4L2_Q_PARAMS(a) v4l2_q_params->a = vp9_quant_params.a
+  Q_PARAMS_TO_V4L2_Q_PARAMS(base_q_idx);
+  Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_y_dc);
+  Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_dc);
+  Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_ac);
+#undef Q_PARAMS_TO_V4L2_Q_PARAMS
+}
+
+static void FillV4L2VP9SegmentationParams(
+    const Vp9SegmentationParams& vp9_segm_params,
+    struct v4l2_vp9_segmentation_params* v4l2_segm_params) {
+#define SET_SEG_PARAMS_FLAG_IF(cond, flag) \
+  v4l2_segm_params->flags |= ((vp9_segm_params.cond) ? (flag) : 0)
+  SET_SEG_PARAMS_FLAG_IF(enabled, V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED);
+  SET_SEG_PARAMS_FLAG_IF(update_map, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP);
+  SET_SEG_PARAMS_FLAG_IF(temporal_update,
+                         V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE);
+  SET_SEG_PARAMS_FLAG_IF(update_data, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA);
+  SET_SEG_PARAMS_FLAG_IF(abs_or_delta_update,
+                         V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE);
+#undef SET_SEG_PARAMS_FLAG_IF
+
+  ARRAY_MEMCPY_CHECKED(v4l2_segm_params->tree_probs,
+                       vp9_segm_params.tree_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_segm_params->pred_probs,
+                       vp9_segm_params.pred_probs);
+  ARRAY_MEMCPY_CHECKED(v4l2_segm_params->feature_data,
+                       vp9_segm_params.feature_data);
+
+  static_assert(arraysize(v4l2_segm_params->feature_enabled) ==
+                        arraysize(vp9_segm_params.feature_enabled) &&
+                    arraysize(v4l2_segm_params->feature_enabled[0]) ==
+                        arraysize(vp9_segm_params.feature_enabled[0]),
+                "feature_enabled arrays must be of same size");
+  for (size_t i = 0; i < arraysize(v4l2_segm_params->feature_enabled); ++i) {
+    for (size_t j = 0; j < arraysize(v4l2_segm_params->feature_enabled[i]);
+         ++j) {
+      v4l2_segm_params->feature_enabled[i][j] =
+          vp9_segm_params.feature_enabled[i][j];
+    }
+  }
+}
+
+static void FillV4L2Vp9EntropyContext(
+    const Vp9FrameContext& vp9_frame_ctx,
+    struct v4l2_vp9_entropy_ctx* v4l2_entropy_ctx) {
+#define ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(a) \
+  ARRAY_MEMCPY_CHECKED(v4l2_entropy_ctx->a, vp9_frame_ctx.a)
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_8x8);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_16x16);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_32x32);
+
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(coef_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(skip_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(inter_mode_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(interp_filter_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(is_inter_prob);
+
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_mode_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(single_ref_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_ref_prob);
+
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(y_mode_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(uv_mode_probs);
+
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(partition_probs);
+
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_joint_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_sign_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_bit_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_bits_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_fr_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_fr_probs);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_hp_prob);
+  ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_hp_prob);
+#undef ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::SubmitDecode(
+    const scoped_refptr<VP9Picture>& pic,
+    const Vp9SegmentationParams& segm_params,
+    const Vp9LoopFilterParams& lf_params,
+    const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
+    const base::Closure& done_cb) {
+  const Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
+  DCHECK(frame_hdr);
+
+  struct v4l2_ctrl_vp9_frame_hdr v4l2_frame_hdr;
+  memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
+
+#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
+  FHDR_TO_V4L2_FHDR(profile);
+  FHDR_TO_V4L2_FHDR(frame_type);
+
+  FHDR_TO_V4L2_FHDR(bit_depth);
+  FHDR_TO_V4L2_FHDR(color_range);
+  FHDR_TO_V4L2_FHDR(subsampling_x);
+  FHDR_TO_V4L2_FHDR(subsampling_y);
+
+  FHDR_TO_V4L2_FHDR(frame_width);
+  FHDR_TO_V4L2_FHDR(frame_height);
+  FHDR_TO_V4L2_FHDR(render_width);
+  FHDR_TO_V4L2_FHDR(render_height);
+
+  FHDR_TO_V4L2_FHDR(reset_frame_context);
+
+  FHDR_TO_V4L2_FHDR(interpolation_filter);
+  FHDR_TO_V4L2_FHDR(frame_context_idx);
+
+  FHDR_TO_V4L2_FHDR(tile_cols_log2);
+  FHDR_TO_V4L2_FHDR(tile_rows_log2);
+
+  FHDR_TO_V4L2_FHDR(header_size_in_bytes);
+#undef FHDR_TO_V4L2_FHDR
+  v4l2_frame_hdr.color_space = static_cast<uint8_t>(frame_hdr->color_space);
+
+  FillV4L2VP9QuantizationParams(frame_hdr->quant_params,
+                                &v4l2_frame_hdr.quant_params);
+
+#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
+  v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
+  SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME);
+  SET_V4L2_FRM_HDR_FLAG_IF(error_resilient_mode,
+                           V4L2_VP9_FRAME_HDR_FLAG_ERR_RES);
+  SET_V4L2_FRM_HDR_FLAG_IF(intra_only, V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA);
+  SET_V4L2_FRM_HDR_FLAG_IF(allow_high_precision_mv,
+                           V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV);
+  SET_V4L2_FRM_HDR_FLAG_IF(refresh_frame_context,
+                           V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX);
+  SET_V4L2_FRM_HDR_FLAG_IF(frame_parallel_decoding_mode,
+                           V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE);
+#undef SET_V4L2_FRM_HDR_FLAG_IF
+
+  FillV4L2VP9LoopFilterParams(lf_params, &v4l2_frame_hdr.lf_params);
+  FillV4L2VP9SegmentationParams(segm_params, &v4l2_frame_hdr.sgmnt_params);
+
+  std::vector<struct v4l2_ext_control> ctrls;
+
+  struct v4l2_ext_control ctrl;
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR;
+  ctrl.size = sizeof(v4l2_frame_hdr);
+  ctrl.p_vp9_frame_hdr = &v4l2_frame_hdr;
+  ctrls.push_back(ctrl);
+
+  struct v4l2_ctrl_vp9_decode_param v4l2_decode_param;
+  memset(&v4l2_decode_param, 0, sizeof(v4l2_decode_param));
+  DCHECK_EQ(ref_pictures.size(), arraysize(v4l2_decode_param.ref_frames));
+
+  std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+  for (size_t i = 0; i < ref_pictures.size(); ++i) {
+    if (ref_pictures[i]) {
+      scoped_refptr<V4L2DecodeSurface> ref_surface =
+          VP9PictureToV4L2DecodeSurface(ref_pictures[i]);
+
+      v4l2_decode_param.ref_frames[i] = ref_surface->output_record();
+      ref_surfaces.push_back(ref_surface);
+    } else {
+      v4l2_decode_param.ref_frames[i] = VIDEO_MAX_FRAME;
+    }
+  }
+
+  static_assert(arraysize(v4l2_decode_param.active_ref_frames) ==
+                    arraysize(frame_hdr->ref_frame_idx),
+                "active reference frame array sizes mismatch");
+
+  for (size_t i = 0; i < arraysize(frame_hdr->ref_frame_idx); ++i) {
+    uint8_t idx = frame_hdr->ref_frame_idx[i];
+    if (idx >= ref_pictures.size())
+      return false;
+
+    struct v4l2_vp9_reference_frame* v4l2_ref_frame =
+        &v4l2_decode_param.active_ref_frames[i];
+
+    scoped_refptr<VP9Picture> ref_pic = ref_pictures[idx];
+    if (ref_pic) {
+      scoped_refptr<V4L2DecodeSurface> ref_surface =
+          VP9PictureToV4L2DecodeSurface(ref_pic);
+      v4l2_ref_frame->buf_index = ref_surface->output_record();
+#define REF_TO_V4L2_REF(a) v4l2_ref_frame->a = ref_pic->frame_hdr->a
+      REF_TO_V4L2_REF(frame_width);
+      REF_TO_V4L2_REF(frame_height);
+      REF_TO_V4L2_REF(bit_depth);
+      REF_TO_V4L2_REF(subsampling_x);
+      REF_TO_V4L2_REF(subsampling_y);
+#undef REF_TO_V4L2_REF
+    } else {
+      v4l2_ref_frame->buf_index = VIDEO_MAX_FRAME;
+    }
+  }
+
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM;
+  ctrl.size = sizeof(v4l2_decode_param);
+  ctrl.p_vp9_decode_param = &v4l2_decode_param;
+  ctrls.push_back(ctrl);
+
+  // Defined outside of the if() clause below as it must remain valid until
+  // the call to SubmitExtControls().
+  struct v4l2_ctrl_vp9_entropy v4l2_entropy;
+  if (device_needs_frame_context_) {
+    memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
+    FillV4L2Vp9EntropyContext(frame_hdr->initial_frame_context,
+                              &v4l2_entropy.initial_entropy_ctx);
+    FillV4L2Vp9EntropyContext(frame_hdr->frame_context,
+                              &v4l2_entropy.current_entropy_ctx);
+    v4l2_entropy.tx_mode = frame_hdr->compressed_header.tx_mode;
+    v4l2_entropy.reference_mode = frame_hdr->compressed_header.reference_mode;
+
+    memset(&ctrl, 0, sizeof(ctrl));
+    ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
+    ctrl.size = sizeof(v4l2_entropy);
+    ctrl.p_vp9_entropy = &v4l2_entropy;
+    ctrls.push_back(ctrl);
+  }
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      VP9PictureToV4L2DecodeSurface(pic);
+
+  struct v4l2_ext_controls ext_ctrls;
+  memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+  ext_ctrls.count = ctrls.size();
+  ext_ctrls.controls = &ctrls[0];
+  ext_ctrls.config_store = dec_surface->config_store();
+  if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
+    return false;
+
+  dec_surface->SetReferenceSurfaces(ref_surfaces);
+  dec_surface->SetDecodeDoneCallback(done_cb);
+
+  if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data,
+                              frame_hdr->frame_size))
+    return false;
+
+  v4l2_dec_->DecodeSurface(dec_surface);
+  return true;
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::OutputPicture(
+    const scoped_refptr<VP9Picture>& pic) {
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      VP9PictureToV4L2DecodeSurface(pic);
+
+  v4l2_dec_->SurfaceReady(dec_surface);
+  return true;
+}
+
+static void FillVp9FrameContext(struct v4l2_vp9_entropy_ctx& v4l2_entropy_ctx,
+                                Vp9FrameContext* vp9_frame_ctx) {
+#define ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(a) \
+  ARRAY_MEMCPY_CHECKED(vp9_frame_ctx->a, v4l2_entropy_ctx.a)
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_8x8);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_16x16);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_32x32);
+
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(coef_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(skip_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(inter_mode_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(interp_filter_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(is_inter_prob);
+
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_mode_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(single_ref_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_ref_prob);
+
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(y_mode_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(uv_mode_probs);
+
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(partition_probs);
+
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_joint_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_sign_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_bit_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_bits_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_fr_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_fr_probs);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_hp_prob);
+  ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_hp_prob);
+#undef ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX
+}
+
+bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::GetFrameContext(
+    const scoped_refptr<VP9Picture>& pic,
+    Vp9FrameContext* frame_ctx) {
+  struct v4l2_ctrl_vp9_entropy v4l2_entropy;
+  memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
+
+  struct v4l2_ext_control ctrl;
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
+  ctrl.size = sizeof(v4l2_entropy);
+  ctrl.p_vp9_entropy = &v4l2_entropy;
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface =
+      VP9PictureToV4L2DecodeSurface(pic);
+
+  struct v4l2_ext_controls ext_ctrls;
+  memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+  ext_ctrls.count = 1;
+  ext_ctrls.controls = &ctrl;
+  ext_ctrls.config_store = dec_surface->config_store();
+
+  if (!v4l2_dec_->GetExtControls(&ext_ctrls))
+    return false;
+
+  FillVp9FrameContext(v4l2_entropy.current_entropy_ctx, frame_ctx);
+  return true;
+}
+
+scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::
+    VP9PictureToV4L2DecodeSurface(const scoped_refptr<VP9Picture>& pic) {
+  V4L2VP9Picture* v4l2_pic = pic->AsV4L2VP9Picture();
+  CHECK(v4l2_pic);
+  return v4l2_pic->dec_surface();
+}
+
+void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
+    const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
+  Enqueue(dec_surface);
+}
+
+void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
+    const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  decoder_display_queue_.push(dec_surface);
+  TryOutputSurfaces();
+}
+
+void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() {
+  while (!decoder_display_queue_.empty()) {
+    scoped_refptr<V4L2DecodeSurface> dec_surface =
+        decoder_display_queue_.front();
+
+    if (!dec_surface->decoded())
+      break;
+
+    decoder_display_queue_.pop();
+    OutputSurface(dec_surface);
+  }
+}
+
+void V4L2SliceVideoDecodeAccelerator::OutputSurface(
+    const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+  OutputRecord& output_record =
+      output_buffer_map_[dec_surface->output_record()];
+
+  bool inserted =
+      surfaces_at_display_
+          .insert(std::make_pair(output_record.picture_id, dec_surface))
+          .second;
+  DCHECK(inserted);
+
+  DCHECK(!output_record.at_client);
+  DCHECK(!output_record.at_device);
+  DCHECK_NE(output_record.picture_id, -1);
+  output_record.at_client = true;
+
+  // TODO(posciak): Use visible size from decoder here instead
+  // (crbug.com/402760). Passing (0, 0) results in the client using the
+  // visible size extracted from the container instead.
+  Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
+                  Rect(0, 0), false);
+  DVLOGF(3) << dec_surface->ToString()
+            << ", bitstream_id: " << picture.bitstream_buffer_id()
+            << ", picture_id: " << picture.picture_buffer_id();
+  pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
+  SendPictureReady();
+  output_record.cleared = true;
+}
+
+scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
+V4L2SliceVideoDecodeAccelerator::CreateSurface() {
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(state_, kDecoding);
+
+  if (free_input_buffers_.empty() || free_output_buffers_.empty())
+    return nullptr;
+
+  int input = free_input_buffers_.front();
+  free_input_buffers_.pop_front();
+  int output = free_output_buffers_.front();
+  free_output_buffers_.pop_front();
+
+  InputRecord& input_record = input_buffer_map_[input];
+  DCHECK_EQ(input_record.bytes_used, 0u);
+  DCHECK_EQ(input_record.input_id, -1);
+  DCHECK(decoder_current_bitstream_buffer_ != nullptr);
+  input_record.input_id = decoder_current_bitstream_buffer_->input_id;
+
+  scoped_refptr<V4L2DecodeSurface> dec_surface = new V4L2DecodeSurface(
+      decoder_current_bitstream_buffer_->input_id, input, output,
+      base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer,
+                 base::Unretained(this)));
+
+  DVLOGF(4) << "Created surface " << input << " -> " << output;
+  return dec_surface;
+}
+
+void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
+  DVLOGF(3);
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_);
+  while (!pending_picture_ready_.empty()) {
+    bool cleared = pending_picture_ready_.front().cleared;
+    const Picture& picture = pending_picture_ready_.front().picture;
+    if (cleared && picture_clearing_count_ == 0) {
+      DVLOGF(4) << "Posting picture ready to decode task runner for: "
+                << picture.picture_buffer_id();
+      // This picture is cleared. It can be posted to a thread different than
+      // the main GPU thread to reduce latency. This should be the case after
+      // all pictures are cleared at the beginning.
+      decode_task_runner_->PostTask(
+          FROM_HERE,
+          base::Bind(&Client::PictureReady, decode_client_, picture));
+      pending_picture_ready_.pop();
+    } else if (!cleared || resetting_or_flushing) {
+      DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
+                << ", decoder_resetting_=" << decoder_resetting_
+                << ", decoder_flushing_=" << decoder_flushing_
+                << ", picture_clearing_count_=" << picture_clearing_count_;
+      DVLOGF(4) << "Posting picture ready to GPU for: "
+                << picture.picture_buffer_id();
+      // If the picture is not cleared, post it to the child thread because it
+      // has to be cleared in the child thread. A picture only needs to be
+      // cleared once. If the decoder is resetting or flushing, send all
+      // pictures to ensure PictureReady arrive before reset or flush done.
+      child_task_runner_->PostTaskAndReply(
+          FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
+          // Unretained is safe. If Client::PictureReady gets to run, |this| is
+          // alive. Destroy() will wait the decode thread to finish.
+          base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared,
+                     base::Unretained(this)));
+      picture_clearing_count_++;
+      pending_picture_ready_.pop();
+    } else {
+      // This picture is cleared. But some pictures are about to be cleared on
+      // the child thread. To preserve the order, do not send this until those
+      // pictures are cleared.
+      break;
+    }
+  }
+}
+
+void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
+  DVLOGF(3) << "clearing count=" << picture_clearing_count_;
+  DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+  DCHECK_GT(picture_clearing_count_, 0);
+  picture_clearing_count_--;
+  SendPictureReady();
+}
+
+bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+    const base::WeakPtr<Client>& decode_client,
+    const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+  decode_client_ = decode_client;
+  decode_task_runner_ = decode_task_runner;
+  return true;
+}
+
+// static
+VideoDecodeAccelerator::SupportedProfiles
+V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
+  scoped_refptr<V4L2Device> device(new V4L2Device());
+  if (!device)
+    return SupportedProfiles();
+
+  return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
+                                            supported_input_fourccs_);
+}
+
+}  // namespace media
diff --git a/vda/v4l2_slice_video_decode_accelerator.h b/vda/v4l2_slice_video_decode_accelerator.h
new file mode 100644
index 0000000..929066f
--- /dev/null
+++ b/vda/v4l2_slice_video_decode_accelerator.h
@@ -0,0 +1,434 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
+#define V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "h264_decoder.h"
+#include "v4l2_device.h"
+#include "video_decode_accelerator.h"
+#include "videodev2.h"
+#include "vp8_decoder.h"
+#include "vp9_decoder.h"
+
+namespace media {
+
+// An implementation of VideoDecodeAccelerator that utilizes the V4L2 slice
+// level codec API for decoding. The slice level API provides only a low-level
+// decoding functionality and requires userspace to provide support for parsing
+// the input stream and managing decoder state across frames.
+class V4L2SliceVideoDecodeAccelerator
+    : public VideoDecodeAccelerator {
+ public:
+  class V4L2DecodeSurface;
+
+  V4L2SliceVideoDecodeAccelerator(
+      const scoped_refptr<V4L2Device>& device);
+  ~V4L2SliceVideoDecodeAccelerator() override;
+
+  // VideoDecodeAccelerator implementation.
+  bool Initialize(const Config& config, Client* client) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
+  void ImportBufferForPicture(
+      int32_t picture_buffer_id,
+      const std::vector<base::FileDescriptor>& dmabuf_fds) override;
+  void ReusePictureBuffer(int32_t picture_buffer_id) override;
+  void Flush() override;
+  void Reset() override;
+  void Destroy() override;
+  bool TryToSetupDecodeOnSeparateThread(
+      const base::WeakPtr<Client>& decode_client,
+      const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+      override;
+
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
+
+ private:
+  class V4L2H264Accelerator;
+  class V4L2VP8Accelerator;
+  class V4L2VP9Accelerator;
+
+  // Record for input buffers.
+  struct InputRecord {
+    InputRecord();
+    int32_t input_id;
+    void* address;
+    size_t length;
+    size_t bytes_used;
+    bool at_device;
+  };
+
+  // Record for output buffers.
+  struct OutputRecord {
+    OutputRecord();
+    bool at_device;
+    bool at_client;
+    int32_t picture_id;
+    std::vector<base::ScopedFD> dmabuf_fds;
+    bool cleared;
+  };
+
+  // See http://crbug.com/255116.
+  // Input bitstream buffer size for up to 1080p streams.
+  const size_t kInputBufferMaxSizeFor1080p = 1024 * 1024;
+  // Input bitstream buffer size for up to 4k streams.
+  const size_t kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p;
+  const size_t kNumInputBuffers = 16;
+
+  // Input format V4L2 fourccs this class supports.
+  static const uint32_t supported_input_fourccs_[];
+
+  //
+  // Below methods are used by accelerator implementations.
+  //
+  // Append slice data in |data| of size |size| to pending hardware
+  // input buffer with |index|. This buffer will be submitted for decode
+  // on the next DecodeSurface(). Return true on success.
+  bool SubmitSlice(int index, const uint8_t* data, size_t size);
+
+  // Submit controls in |ext_ctrls| to hardware. Return true on success.
+  bool SubmitExtControls(struct v4l2_ext_controls* ext_ctrls);
+
+  // Gets current control values for controls in |ext_ctrls| from the driver.
+  // Return true on success.
+  bool GetExtControls(struct v4l2_ext_controls* ext_ctrls);
+
+  // Return true if the driver exposes V4L2 control |ctrl_id|, false otherwise.
+  bool IsCtrlExposed(uint32_t ctrl_id);
+
+  // Decode of |dec_surface| is ready to be submitted and all codec-specific
+  // settings are set in hardware.
+  void DecodeSurface(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
+
+  // |dec_surface| is ready to be outputted once decode is finished.
+  // This can be called before decode is actually done in hardware, and this
+  // method is responsible for maintaining the ordering, i.e. the surfaces will
+  // be outputted in the same order as SurfaceReady calls. To do so, the
+  // surfaces are put on decoder_display_queue_ and sent to output in that
+  // order once all preceding surfaces are sent.
+  void SurfaceReady(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
+
+  //
+  // Internal methods of this class.
+  //
+  // Recycle a V4L2 input buffer with |index| after dequeuing from device.
+  void ReuseInputBuffer(int index);
+
+  // Recycle V4L2 output buffer with |index|. Used as surface release callback.
+  void ReuseOutputBuffer(int index);
+
+  // Queue a |dec_surface| to device for decoding.
+  void Enqueue(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
+
+  // Dequeue any V4L2 buffers available and process.
+  void Dequeue();
+
+  // V4L2 QBUF helpers.
+  bool EnqueueInputRecord(int index, uint32_t config_store);
+  bool EnqueueOutputRecord(int index);
+
+  // Set input and output formats in hardware.
+  bool SetupFormats();
+
+  // Create input and output buffers.
+  bool CreateInputBuffers();
+  bool CreateOutputBuffers();
+
+  // Destroy input buffers.
+  void DestroyInputBuffers();
+
+  // Destroy output buffers. If |dismiss| is true, also dismissing the
+  // associated PictureBuffers.
+  bool DestroyOutputs(bool dismiss);
+
+  // Used by DestroyOutputs.
+  bool DestroyOutputBuffers();
+
+  // Dismiss all |picture_buffer_ids| via Client::DismissPictureBuffer()
+  // and signal |done| after finishing.
+  void DismissPictures(const std::vector<int32_t>& picture_buffer_ids,
+                       base::WaitableEvent* done);
+
+  // Task to finish initialization on decoder_thread_.
+  void InitializeTask();
+
+  void NotifyError(Error error);
+  void DestroyTask();
+
+  // Sets the state to kError and notifies client if needed.
+  void SetErrorState(Error error);
+
+  // Event handling. Events include flush, reset and resolution change and are
+  // processed while in kIdle state.
+
+  // Surface set change (resolution change) flow.
+  // If we have no surfaces allocated, start it immediately, otherwise mark
+  // ourselves as pending for surface set change.
+  void InitiateSurfaceSetChange();
+  // If a surface set change is pending and we are ready, stop the device,
+  // destroy outputs, releasing resources and dismissing pictures as required,
+  // followed by starting the flow to allocate a new set for the current
+  // resolution/DPB size, as provided by decoder.
+  bool FinishSurfaceSetChange();
+
+  // Flush flow when requested by client.
+  // When Flush() is called, it posts a FlushTask, which checks the input queue.
+  // If nothing is pending for decode on decoder_input_queue_, we call
+  // InitiateFlush() directly. Otherwise, we push a dummy BitstreamBufferRef
+  // onto the decoder_input_queue_ to schedule a flush. When we reach it later
+  // on, we call InitiateFlush() to perform it at the correct time.
+  void FlushTask();
+  // Tell the decoder to flush all frames, reset it and mark us as scheduled
+  // for flush, so that we can finish it once all pending decodes are finished.
+  void InitiateFlush();
+  // To be called if decoder_flushing_ is true. If not all pending frames are
+  // decoded, return false, requesting the caller to try again later.
+  // Otherwise perform flush by sending all pending pictures to the client,
+  // notify it that flush is finished and return true, informing the caller
+  // that further progress can be made.
+  bool FinishFlush();
+
+  // Reset flow when requested by client.
+  // Drop all inputs, reset the decoder and mark us as pending for reset.
+  void ResetTask();
+  // To be called if decoder_resetting_ is true. If not all pending frames are
+  // decoded, return false, requesting the caller to try again later.
+  // Otherwise perform reset by dropping all pending outputs (client is not
+  // interested anymore), notifying it that reset is finished, and return true,
+  // informing the caller that further progress can be made.
+  bool FinishReset();
+
+  // Called when a new event is pended. Transitions us into kIdle state (if not
+  // already in it), if possible. Also starts processing events.
+  void NewEventPending();
+
+  // Called after all events are processed successfully (i.e. all Finish*()
+  // methods return true) to return to decoding state.
+  bool FinishEventProcessing();
+
+  // Process pending events, if any.
+  void ProcessPendingEventsIfNeeded();
+
+  // Allocate V4L2 buffers and assign them to |buffers| provided by the client
+  // via AssignPictureBuffers() on decoder thread.
+  void AssignPictureBuffersTask(const std::vector<PictureBuffer>& buffers);
+
+  // Use buffer backed by dmabuf file descriptors in |passed_dmabuf_fds| for the
+  // OutputRecord associated with |picture_buffer_id|, taking ownership of the
+  // file descriptors.
+  void ImportBufferForPictureTask(
+      int32_t picture_buffer_id,
+      // TODO(posciak): (crbug.com/561749) we should normally be able to pass
+      // the vector by itself via std::move, but it's not possible to do this
+      // if this method is used as a callback.
+      std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds);
+
+  // Performed on decoder_thread_ as a consequence of poll() on decoder_thread_
+  // returning an event.
+  void ServiceDeviceTask();
+
+  // Schedule poll if we have any buffers queued and the poll thread
+  // is not stopped (on surface set change).
+  void SchedulePollIfNeeded();
+
+  // Attempt to start/stop device_poll_thread_.
+  bool StartDevicePoll();
+  bool StopDevicePoll(bool keep_input_state);
+
+  // Ran on device_poll_thread_ to wait for device events.
+  void DevicePollTask(bool poll_device);
+
+  enum State {
+    // We are in this state until Initialize() returns successfully.
+    // We can't post errors to the client in this state yet.
+    kUninitialized,
+    // Initialize() returned successfully.
+    kInitialized,
+    // This state allows making progress decoding more input stream.
+    kDecoding,
+    // Transitional state when we are not decoding any more stream, but are
+    // performing flush, reset, resolution change or are destroying ourselves.
+    kIdle,
+    // Requested new PictureBuffers via ProvidePictureBuffers(), awaiting
+    // AssignPictureBuffers().
+    kAwaitingPictureBuffers,
+    // Error state, set when sending NotifyError to client.
+    kError,
+  };
+
+  // Buffer id for flush buffer, queued by FlushTask().
+  const int kFlushBufferId = -2;
+
+  // Handler for Decode() on decoder_thread_.
+  void DecodeTask(const BitstreamBuffer& bitstream_buffer);
+
+  // Schedule a new DecodeBufferTask if we are decoding.
+  void ScheduleDecodeBufferTaskIfNeeded();
+
+  // Main decoder loop. Keep decoding the current buffer in decoder_, asking
+  // for more stream via TrySetNewBistreamBuffer() if decoder_ requests so,
+  // and handle other returns from it appropriately.
+  void DecodeBufferTask();
+
+  // Check decoder_input_queue_ for any available buffers to decode and
+  // set the decoder_current_bitstream_buffer_ to the next buffer if one is
+  // available, taking it off the queue. Also set the current stream pointer
+  // in decoder_, and return true.
+  // Return false if no buffers are pending on decoder_input_queue_.
+  bool TrySetNewBistreamBuffer();
+
+  // Auto-destruction reference for EGLSync (for message-passing).
+  void ReusePictureBufferTask(int32_t picture_buffer_id);
+
+  // Called to actually send |dec_surface| to the client, after it is decoded
+  // preserving the order in which it was scheduled via SurfaceReady().
+  void OutputSurface(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
+
+  // Goes over the |decoder_display_queue_| and sends all buffers from the
+  // front of the queue that are already decoded to the client, in order.
+  void TryOutputSurfaces();
+
+  // Creates a new decode surface or returns nullptr if one is not available.
+  scoped_refptr<V4L2DecodeSurface> CreateSurface();
+
+  // Send decoded pictures to PictureReady.
+  void SendPictureReady();
+
+  // Callback that indicates a picture has been cleared.
+  void PictureCleared();
+
+  size_t input_planes_count_;
+  size_t output_planes_count_;
+
+  // GPU Child thread task runner.
+  const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
+
+  // Task runner Decode() and PictureReady() run on.
+  scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
+
+  // WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
+  // device worker threads back to the child thread.
+  base::WeakPtr<V4L2SliceVideoDecodeAccelerator> weak_this_;
+
+  // To expose client callbacks from VideoDecodeAccelerator.
+  // NOTE: all calls to these objects *MUST* be executed on
+  // child_task_runner_.
+  std::unique_ptr<base::WeakPtrFactory<VideoDecodeAccelerator::Client>>
+      client_ptr_factory_;
+  base::WeakPtr<VideoDecodeAccelerator::Client> client_;
+  // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+  base::WeakPtr<Client> decode_client_;
+
+  // V4L2 device in use.
+  scoped_refptr<V4L2Device> device_;
+
+  // Thread to communicate with the device on.
+  base::Thread decoder_thread_;
+  scoped_refptr<base::SingleThreadTaskRunner> decoder_thread_task_runner_;
+
+  // Thread used to poll the device for events.
+  base::Thread device_poll_thread_;
+
+  // Input queue state.
+  bool input_streamon_;
+  // Number of input buffers enqueued to the device.
+  int input_buffer_queued_count_;
+  // Input buffers ready to use; LIFO since we don't care about ordering.
+  std::list<int> free_input_buffers_;
+  // Mapping of int index to an input buffer record.
+  std::vector<InputRecord> input_buffer_map_;
+
+  // Output queue state.
+  bool output_streamon_;
+  // Number of output buffers enqueued to the device.
+  int output_buffer_queued_count_;
+  // Output buffers ready to use.
+  std::list<int> free_output_buffers_;
+  // Mapping of int index to an output buffer record.
+  std::vector<OutputRecord> output_buffer_map_;
+
+  VideoCodecProfile video_profile_;
+  uint32_t input_format_fourcc_;
+  uint32_t output_format_fourcc_;
+  Size visible_size_;
+  Size coded_size_;
+
+  struct BitstreamBufferRef;
+  // Input queue of stream buffers coming from the client.
+  std::queue<linked_ptr<BitstreamBufferRef>> decoder_input_queue_;
+  // BitstreamBuffer currently being processed.
+  std::unique_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
+
+  // Queue storing decode surfaces ready to be output as soon as they are
+  // decoded. The surfaces must be output in order they are queued.
+  std::queue<scoped_refptr<V4L2DecodeSurface>> decoder_display_queue_;
+
+  // Decoder state.
+  State state_;
+
+  Config::OutputMode output_mode_;
+
+  // If any of these are true, we are waiting for the device to finish decoding
+  // all previously-queued frames, so we can finish the flush/reset/surface
+  // change flows. These can stack.
+  bool decoder_flushing_;
+  bool decoder_resetting_;
+  bool surface_set_change_pending_;
+
+  // Hardware accelerators.
+  // TODO(posciak): Try to have a superclass here if possible.
+  std::unique_ptr<V4L2H264Accelerator> h264_accelerator_;
+  std::unique_ptr<V4L2VP8Accelerator> vp8_accelerator_;
+  std::unique_ptr<V4L2VP9Accelerator> vp9_accelerator_;
+
+  // Codec-specific software decoder in use.
+  std::unique_ptr<AcceleratedVideoDecoder> decoder_;
+
+  // Surfaces queued to device to keep references to them while decoded.
+  using V4L2DecodeSurfaceByOutputId =
+      std::map<int, scoped_refptr<V4L2DecodeSurface>>;
+  V4L2DecodeSurfaceByOutputId surfaces_at_device_;
+
+  // Surfaces sent to client to keep references to them while displayed.
+  using V4L2DecodeSurfaceByPictureBufferId =
+      std::map<int32_t, scoped_refptr<V4L2DecodeSurface>>;
+  V4L2DecodeSurfaceByPictureBufferId surfaces_at_display_;
+
+  // Record for decoded pictures that can be sent to PictureReady.
+  struct PictureRecord {
+    PictureRecord(bool cleared, const Picture& picture);
+    ~PictureRecord();
+    bool cleared;  // Whether the texture is cleared and safe to render from.
+    Picture picture;  // The decoded picture.
+  };
+  // Pictures that are ready but not sent to PictureReady yet.
+  std::queue<PictureRecord> pending_picture_ready_;
+
+  // The number of pictures that are sent to PictureReady and will be cleared.
+  int picture_clearing_count_;
+
+  // The WeakPtrFactory for |weak_this_|.
+  base::WeakPtrFactory<V4L2SliceVideoDecodeAccelerator> weak_this_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2SliceVideoDecodeAccelerator);
+};
+
+}  // namespace media
+
+#endif  // V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/vda/videodev2.h b/vda/videodev2.h
index f00b71c..54dc343 100644
--- a/vda/videodev2.h
+++ b/vda/videodev2.h
@@ -69,7 +69,8 @@
 #include <linux/ioctl.h>
 #include <linux/types.h>
 #include <linux/v4l2-common.h>
-#include <linux/v4l2-controls.h>
+//#include <linux/v4l2-controls.h>
+#include <v4l2-controls.h>  // use local v4l2-controls.h file
 
 /*
  * Common stuff for both V4L1 and V4L2
diff --git a/vda/vp8_picture.cc b/vda/vp8_picture.cc
index 7c01a11..59938aa 100644
--- a/vda/vp8_picture.cc
+++ b/vda/vp8_picture.cc
@@ -10,4 +10,8 @@
 
 VP8Picture::~VP8Picture() {}
 
+V4L2VP8Picture* VP8Picture::AsV4L2VP8Picture() {
+  return nullptr;
+}
+
 }  // namespace media
diff --git a/vda/vp8_picture.h b/vda/vp8_picture.h
index b8e7417..eb253a4 100644
--- a/vda/vp8_picture.h
+++ b/vda/vp8_picture.h
@@ -10,10 +10,14 @@
 
 namespace media {
 
+class V4L2VP8Picture;
+
 class VP8Picture : public base::RefCounted<VP8Picture> {
  public:
   VP8Picture();
 
+  virtual V4L2VP8Picture* AsV4L2VP8Picture();
+
  protected:
   friend class base::RefCounted<VP8Picture>;
   virtual ~VP8Picture();
diff --git a/vda/vp9_picture.cc b/vda/vp9_picture.cc
index ed3c65a..a99427f 100644
--- a/vda/vp9_picture.cc
+++ b/vda/vp9_picture.cc
@@ -10,4 +10,8 @@
 
 VP9Picture::~VP9Picture() {}
 
+V4L2VP9Picture* VP9Picture::AsV4L2VP9Picture() {
+  return nullptr;
+}
+
 }  // namespace media
diff --git a/vda/vp9_picture.h b/vda/vp9_picture.h
index 2bd5bbc..23e299b 100644
--- a/vda/vp9_picture.h
+++ b/vda/vp9_picture.h
@@ -13,10 +13,14 @@
 
 namespace media {
 
+class V4L2VP9Picture;
+
 class VP9Picture : public base::RefCounted<VP9Picture> {
  public:
   VP9Picture();
 
+  virtual V4L2VP9Picture* AsV4L2VP9Picture();
+
   std::unique_ptr<Vp9FrameHeader> frame_hdr;
 
  protected: