Merge Android Pie into master

Bug: 112104996
Change-Id: I656c0ae0e8ee1a6207e55b48d3789dbe6c95a881
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..b043d46
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,33 @@
+---
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android does support C++11 now.
+Standard: Cpp11
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..db4d39f
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,32 @@
+cc_library_shared {
+    name: "libv4l2_codec2_arcva_factory",
+    vendor_available: true,
+    product_variables: {
+        arc: {
+            srcs: ["C2ArcVideoAcceleratorFactory.cpp"],
+
+            shared_libs: [
+                "libarcbridge",
+                "libarcbridgeservice",
+                "libarcvideobridge",
+                "libbinder",
+                "libchrome",
+                "liblog",
+                "libmojo",
+                "libutils",
+            ],
+
+            // -Wno-unused-parameter is needed for libchrome/base codes
+            cflags: [
+                "-Wall",
+                "-Werror",
+                "-Wno-unused-parameter",
+                "-std=c++14",
+            ],
+        },
+    },
+    clang: true,
+    export_include_dirs: [
+        "include",
+    ],
+}
diff --git a/Android.mk b/Android.mk
index 3208f8c..bff4409 100644
--- a/Android.mk
+++ b/Android.mk
@@ -1,4 +1,67 @@
+# Build only if both hardware/google/av and device/google/cheets2/codec2 are
+# visible; otherwise, don't build any target under this repository.
+ifneq (,$(findstring hardware/google/av,$(PRODUCT_SOONG_NAMESPACES)))
+ifneq (,$(findstring device/google/cheets2/codec2,$(PRODUCT_SOONG_NAMESPACES)))
+
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
 
-include $(LOCAL_PATH)/vda/Android.mk
+LOCAL_SRC_FILES:= \
+        C2VDAComponent.cpp \
+        C2VDAAdaptor.cpp   \
+
+LOCAL_C_INCLUDES += \
+        $(TOP)/device/google/cheets2/codec2/vdastore/include \
+        $(TOP)/external/libchrome \
+        $(TOP)/external/gtest/include \
+        $(TOP)/external/v4l2_codec2/include \
+        $(TOP)/external/v4l2_codec2/vda \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/hardware/google/av/codec2/include \
+        $(TOP)/hardware/google/av/codec2/vndk/include \
+        $(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_MODULE:= libv4l2_codec2
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libchrome \
+                          liblog \
+                          libmedia \
+                          libstagefright \
+                          libstagefright_codec2 \
+                          libstagefright_codec2_vndk \
+                          libstagefright_simple_c2component \
+                          libstagefright_foundation \
+                          libutils \
+                          libv4l2_codec2_vda \
+                          libvda_c2componentstore \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter -std=c++14
+LOCAL_CFLAGS += -Wno-unused-lambda-capture -Wno-unknown-warning-option
+LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+# Build C2VDAAdaptorProxy only for ARC++ case.
+ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
+LOCAL_CFLAGS += -DV4L2_CODEC2_ARC
+LOCAL_SRC_FILES += \
+                   C2VDAAdaptorProxy.cpp \
+
+LOCAL_SRC_FILES := $(filter-out C2VDAAdaptor.cpp, $(LOCAL_SRC_FILES))
+LOCAL_SHARED_LIBRARIES += libarcbridge \
+                          libarcbridgeservice \
+                          libmojo \
+                          libv4l2_codec2_arcva_factory \
+
+endif # ifneq (,$(findstring cheets_,$(TARGET_PRODUCT)))
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
+endif  #ifneq (,$(findstring device/google/cheets2/codec2,$(PRODUCT_SOONG_NAMESPACES)))
+endif  #ifneq (,$(findstring hardware/google/av,$(PRODUCT_SOONG_NAMESPACES)))
diff --git a/C2ArcVideoAcceleratorFactory.cpp b/C2ArcVideoAcceleratorFactory.cpp
new file mode 100644
index 0000000..07997d1
--- /dev/null
+++ b/C2ArcVideoAcceleratorFactory.cpp
@@ -0,0 +1,87 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "C2ArcVideoAcceleratorFactory"
+
+#include <C2ArcVideoAcceleratorFactory.h>
+
+#include <base/bind.h>
+#include <binder/IServiceManager.h>
+#include <mojo/edk/embedder/embedder.h>
+#include <mojo/public/cpp/bindings/interface_request.h>
+#include <mojo/public/cpp/system/handle.h>
+#include <utils/Log.h>
+
+namespace android {
+
+ANDROID_SINGLETON_STATIC_INSTANCE(C2ArcVideoAcceleratorFactory)
+
+bool C2ArcVideoAcceleratorFactory::createVideoDecodeAccelerator(
+        ::arc::mojom::VideoDecodeAcceleratorRequest request) {
+    if (!mRemoteFactory) {
+        ALOGE("Factory is not ready");
+        return false;
+    }
+    mRemoteFactory->CreateDecodeAccelerator(std::move(request));
+    return true;
+}
+
+bool C2ArcVideoAcceleratorFactory::createVideoEncodeAccelerator(
+        ::arc::mojom::VideoEncodeAcceleratorRequest request) {
+    if (!mRemoteFactory) {
+        ALOGE("Factory is not ready");
+        return false;
+    }
+    mRemoteFactory->CreateEncodeAccelerator(std::move(request));
+    return true;
+}
+
+bool C2ArcVideoAcceleratorFactory::createVideoProtectedBufferAllocator(
+        ::arc::mojom::VideoProtectedBufferAllocatorRequest request) {
+    if (!mRemoteFactory) {
+        ALOGE("Factory is not ready");
+        return false;
+    }
+    mRemoteFactory->CreateProtectedBufferAllocator(std::move(request));
+    return true;
+}
+
+int32_t C2ArcVideoAcceleratorFactory::hostVersion() const {
+    return mHostVersion;
+}
+
+C2ArcVideoAcceleratorFactory::C2ArcVideoAcceleratorFactory() : mHostVersion(0) {
+    sp<IBinder> binder =
+            defaultServiceManager()->getService(String16("android.os.IArcVideoBridge"));
+    if (binder == nullptr) {
+        ALOGE("Failed to find IArcVideoBridge service");
+        return;
+    }
+    mArcVideoBridge = interface_cast<IArcVideoBridge>(binder);
+    mHostVersion = mArcVideoBridge->hostVersion();
+    if (mHostVersion < 4) {
+        ALOGW("HostVersion(%d) is outdated", mHostVersion);
+        return;
+    }
+
+    ALOGV("HostVersion: %d", mHostVersion);
+
+    ::arc::MojoBootstrapResult bootstrapResult =
+            mArcVideoBridge->bootstrapVideoAcceleratorFactory();
+    if (!bootstrapResult.is_valid()) {
+        ALOGE("bootstrapVideoAcceleratorFactory returns invalid result");
+        return;
+    }
+    mojo::edk::ScopedPlatformHandle handle(
+            mojo::edk::PlatformHandle(bootstrapResult.releaseFd().release()));
+    ALOGV("SetParentPipeHandle(fd=%d)", handle.get().handle);
+    mojo::edk::SetParentPipeHandle(std::move(handle));
+    mojo::ScopedMessagePipeHandle server_pipe =
+            mojo::edk::CreateChildMessagePipe(bootstrapResult.releaseToken());
+    mRemoteFactory.Bind(mojo::InterfacePtrInfo<::arc::mojom::VideoAcceleratorFactory>(
+            std::move(server_pipe), 7u));
+}
+
+}  // namespace android
diff --git a/C2VDAAdaptor.cpp b/C2VDAAdaptor.cpp
new file mode 100644
index 0000000..3a960d9
--- /dev/null
+++ b/C2VDAAdaptor.cpp
@@ -0,0 +1,212 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAAdaptor"
+
+#include <C2VDAAdaptor.h>
+
+#include <bitstream_buffer.h>
+#include <native_pixmap_handle.h>
+#include <v4l2_device.h>
+#include <v4l2_slice_video_decode_accelerator.h>
+#include <video_pixel_format.h>
+#include <videodev2.h>
+
+#include <utils/Log.h>
+
+namespace android {
+
+constexpr SupportedPixelFormat kSupportedPixelFormats[] = {
+        // {mCrcb, mSemiplanar, mPixelFormat}
+        {false, true, HalPixelFormat::NV12},
+        {true, false, HalPixelFormat::YV12},
+        // Add more buffer formats when needed
+};
+
+C2VDAAdaptor::C2VDAAdaptor() : mNumOutputBuffers(0u) {}
+
+C2VDAAdaptor::~C2VDAAdaptor() {
+    if (mVDA) {
+        destroy();
+    }
+}
+
+VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptor::initialize(
+        media::VideoCodecProfile profile, bool secureMode,
+        VideoDecodeAcceleratorAdaptor::Client* client) {
+    // TODO: use secureMode here, or ignore?
+    if (mVDA) {
+        ALOGE("Re-initialize() is not allowed");
+        return ILLEGAL_STATE;
+    }
+
+    media::VideoDecodeAccelerator::Config config;
+    config.profile = profile;
+    config.output_mode = media::VideoDecodeAccelerator::Config::OutputMode::IMPORT;
+
+    // TODO(johnylin): may need to implement factory to create VDA if there are multiple VDA
+    // implementations in the future.
+    scoped_refptr<media::V4L2Device> device = new media::V4L2Device();
+    std::unique_ptr<media::VideoDecodeAccelerator> vda(
+            new media::V4L2SliceVideoDecodeAccelerator(device));
+    if (!vda->Initialize(config, this)) {
+        ALOGE("Failed to initialize VDA");
+        return PLATFORM_FAILURE;
+    }
+
+    mVDA = std::move(vda);
+    mClient = client;
+
+    return SUCCESS;
+}
+
+void C2VDAAdaptor::decode(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed) {
+    CHECK(mVDA);
+    mVDA->Decode(media::BitstreamBuffer(bitstreamId, base::SharedMemoryHandle(ashmemFd, true),
+                                        bytesUsed, offset));
+}
+
+void C2VDAAdaptor::assignPictureBuffers(uint32_t numOutputBuffers) {
+    CHECK(mVDA);
+    std::vector<media::PictureBuffer> buffers;
+    for (uint32_t id = 0; id < numOutputBuffers; ++id) {
+        buffers.push_back(media::PictureBuffer(static_cast<int32_t>(id), mPictureSize));
+    }
+    mVDA->AssignPictureBuffers(buffers);
+    mNumOutputBuffers = numOutputBuffers;
+}
+
+void C2VDAAdaptor::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                          int dmabufFd,
+                                          const std::vector<VideoFramePlane>& planes) {
+    CHECK(mVDA);
+    CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
+
+    media::VideoPixelFormat pixelFormat;
+    switch (format) {
+        case HalPixelFormat::YV12:
+            pixelFormat = media::PIXEL_FORMAT_YV12;
+            break;
+        case HalPixelFormat::NV12:
+            pixelFormat = media::PIXEL_FORMAT_NV12;
+            break;
+        default:
+            LOG_ALWAYS_FATAL("Unsupported format: 0x%x", format);
+            return;
+    }
+
+    media::NativePixmapHandle handle;
+    handle.fds.emplace_back(base::FileDescriptor(dmabufFd, true));
+    for (const auto& plane : planes) {
+        handle.planes.emplace_back(plane.mStride, plane.mOffset, 0, 0);
+    }
+    mVDA->ImportBufferForPicture(pictureBufferId, pixelFormat, handle);
+}
+
+void C2VDAAdaptor::reusePictureBuffer(int32_t pictureBufferId) {
+    CHECK(mVDA);
+    CHECK_LT(pictureBufferId, static_cast<int32_t>(mNumOutputBuffers));
+
+    mVDA->ReusePictureBuffer(pictureBufferId);
+}
+
+void C2VDAAdaptor::flush() {
+    CHECK(mVDA);
+    mVDA->Flush();
+}
+
+void C2VDAAdaptor::reset() {
+    CHECK(mVDA);
+    mVDA->Reset();
+}
+
+void C2VDAAdaptor::destroy() {
+    mVDA.reset(nullptr);
+    mNumOutputBuffers = 0u;
+    mPictureSize = media::Size();
+}
+
+//static
+media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptor::GetSupportedProfiles(
+        uint32_t inputFormatFourcc) {
+    media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
+    auto allProfiles = media::V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles();
+    bool isSliceBased = (inputFormatFourcc == V4L2_PIX_FMT_H264_SLICE) ||
+                        (inputFormatFourcc == V4L2_PIX_FMT_VP8_FRAME) ||
+                        (inputFormatFourcc == V4L2_PIX_FMT_VP9_FRAME);
+    for (const auto& profile : allProfiles) {
+        if (inputFormatFourcc ==
+            media::V4L2Device::VideoCodecProfileToV4L2PixFmt(profile.profile, isSliceBased)) {
+            supportedProfiles.push_back(profile);
+        }
+    }
+    return supportedProfiles;
+}
+
+//static
+HalPixelFormat C2VDAAdaptor::ResolveBufferFormat(bool crcb, bool semiplanar) {
+    auto value = std::find_if(std::begin(kSupportedPixelFormats), std::end(kSupportedPixelFormats),
+                              [crcb, semiplanar](const struct SupportedPixelFormat& f) {
+                                  return f.mCrcb == crcb && f.mSemiplanar == semiplanar;
+                              });
+    LOG_ALWAYS_FATAL_IF(value == std::end(kSupportedPixelFormats),
+                        "Unsupported pixel format: (crcb=%d, semiplanar=%d)", crcb, semiplanar);
+    return value->mPixelFormat;
+}
+
+void C2VDAAdaptor::ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+                                         media::VideoPixelFormat output_format,
+                                         const media::Size& dimensions) {
+    // per change ag/3262504, output_format from VDA is no longer used, component side always
+    // allocate graphic buffers for flexible YUV format.
+    (void)output_format;
+
+    mClient->providePictureBuffers(requested_num_of_buffers, dimensions);
+    mPictureSize = dimensions;
+}
+
+void C2VDAAdaptor::DismissPictureBuffer(int32_t picture_buffer_id) {
+    mClient->dismissPictureBuffer(picture_buffer_id);
+}
+
+void C2VDAAdaptor::PictureReady(const media::Picture& picture) {
+    mClient->pictureReady(picture.picture_buffer_id(), picture.bitstream_buffer_id(),
+                          picture.visible_rect());
+}
+
+void C2VDAAdaptor::NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) {
+    mClient->notifyEndOfBitstreamBuffer(bitstream_buffer_id);
+}
+
+void C2VDAAdaptor::NotifyFlushDone() {
+    mClient->notifyFlushDone();
+}
+
+void C2VDAAdaptor::NotifyResetDone() {
+    mClient->notifyResetDone();
+}
+
+static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
+        media::VideoDecodeAccelerator::Error error) {
+    switch (error) {
+    case media::VideoDecodeAccelerator::ILLEGAL_STATE:
+        return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
+    case media::VideoDecodeAccelerator::INVALID_ARGUMENT:
+        return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
+    case media::VideoDecodeAccelerator::UNREADABLE_INPUT:
+        return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
+    case media::VideoDecodeAccelerator::PLATFORM_FAILURE:
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    default:
+        ALOGE("Unknown error code: %d", static_cast<int>(error));
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+}
+
+void C2VDAAdaptor::NotifyError(media::VideoDecodeAccelerator::Error error) {
+    mClient->notifyError(convertErrorCode(error));
+}
+
+}  // namespace android
diff --git a/C2VDAAdaptorProxy.cpp b/C2VDAAdaptorProxy.cpp
new file mode 100644
index 0000000..2c44e6b
--- /dev/null
+++ b/C2VDAAdaptorProxy.cpp
@@ -0,0 +1,344 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAAdaptorProxy"
+
+#include <C2ArcVideoAcceleratorFactory.h>
+#include <C2VDAAdaptorProxy.h>
+
+#include <videodev2.h>
+
+#include <arc/MojoProcessSupport.h>
+#include <arc/MojoThread.h>
+#include <base/bind.h>
+#include <binder/IServiceManager.h>
+#include <mojo/edk/embedder/embedder.h>
+#include <mojo/public/cpp/system/handle.h>
+#include <utils/Log.h>
+
+namespace mojo {
+template <>
+struct TypeConverter<::arc::VideoFramePlane, android::VideoFramePlane> {
+    static ::arc::VideoFramePlane Convert(const android::VideoFramePlane& plane) {
+        return ::arc::VideoFramePlane{static_cast<int32_t>(plane.mOffset),
+                                      static_cast<int32_t>(plane.mStride)};
+    }
+};
+}  // namespace mojo
+
+namespace android {
+namespace arc {
+constexpr SupportedPixelFormat kSupportedPixelFormats[] = {
+        // {mCrcb, mSemiplanar, mPixelFormat}
+        {false, true, HalPixelFormat::NV12},
+        {true, false, HalPixelFormat::YV12},
+        // Add more buffer formats when needed
+};
+
+C2VDAAdaptorProxy::C2VDAAdaptorProxy()
+      : C2VDAAdaptorProxy(::arc::MojoProcessSupport::getLeakyInstance()) {}
+
+C2VDAAdaptorProxy::C2VDAAdaptorProxy(::arc::MojoProcessSupport* mojoProcessSupport)
+      : mClient(nullptr),
+        mMojoTaskRunner(mojoProcessSupport->mojo_thread().getTaskRunner()),
+        mBinding(this),
+        mRelay(new ::arc::CancellationRelay()) {}
+
+C2VDAAdaptorProxy::~C2VDAAdaptorProxy() {}
+
+void C2VDAAdaptorProxy::onConnectionError(const std::string& pipeName) {
+    ALOGE("onConnectionError (%s)", pipeName.c_str());
+    mRelay->cancel();
+    NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+}
+
+bool C2VDAAdaptorProxy::establishChannel() {
+    ALOGV("establishChannel");
+    auto future = ::arc::Future<bool>::make_shared(mRelay);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::establishChannelOnMojoThread,
+                                         base::Unretained(this), future));
+    return future->wait() && future->get();
+}
+
+void C2VDAAdaptorProxy::establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future) {
+    C2ArcVideoAcceleratorFactory& factory = ::android::C2ArcVideoAcceleratorFactory::getInstance();
+
+    if (!factory.createVideoDecodeAccelerator(mojo::MakeRequest(&mVDAPtr))) {
+        future->set(false);
+        return;
+    }
+    mVDAPtr.set_connection_error_handler(base::Bind(&C2VDAAdaptorProxy::onConnectionError,
+                                                    base::Unretained(this),
+                                                    std::string("mVDAPtr (vda pipe)")));
+    mVDAPtr.QueryVersion(base::Bind(&C2VDAAdaptorProxy::onVersionReady, base::Unretained(this),
+                                    std::move(future)));
+}
+
+void C2VDAAdaptorProxy::onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version) {
+    ALOGI("VideoDecodeAccelerator ready (version=%d)", version);
+
+    future->set(true);
+}
+
+void C2VDAAdaptorProxy::ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format) {
+    ALOGV("ProvidePictureBuffers");
+    mClient->providePictureBuffers(
+            format->min_num_buffers,
+            media::Size(format->coded_size.width(), format->coded_size.height()));
+}
+void C2VDAAdaptorProxy::PictureReady(::arc::mojom::PicturePtr picture) {
+    ALOGV("PictureReady");
+    const auto& rect = picture->crop_rect;
+    mClient->pictureReady(picture->picture_buffer_id, picture->bitstream_id,
+                          media::Rect(rect.x(), rect.y(), rect.right(), rect.bottom()));
+}
+
+static VideoDecodeAcceleratorAdaptor::Result convertErrorCode(
+        ::arc::mojom::VideoDecodeAccelerator::Result error) {
+    switch (error) {
+    case ::arc::mojom::VideoDecodeAccelerator::Result::ILLEGAL_STATE:
+        return VideoDecodeAcceleratorAdaptor::ILLEGAL_STATE;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::INVALID_ARGUMENT:
+        return VideoDecodeAcceleratorAdaptor::INVALID_ARGUMENT;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::UNREADABLE_INPUT:
+        return VideoDecodeAcceleratorAdaptor::UNREADABLE_INPUT;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE:
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    case ::arc::mojom::VideoDecodeAccelerator::Result::INSUFFICIENT_RESOURCES:
+        return VideoDecodeAcceleratorAdaptor::INSUFFICIENT_RESOURCES;
+
+    default:
+        ALOGE("Unknown error code: %d", static_cast<int>(error));
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+}
+
+void C2VDAAdaptorProxy::NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) {
+    ALOGE("NotifyError %d", static_cast<int>(error));
+    mClient->notifyError(convertErrorCode(error));
+}
+
+void C2VDAAdaptorProxy::NotifyEndOfBitstreamBuffer(int32_t bitstream_id) {
+    ALOGV("NotifyEndOfBitstreamBuffer");
+    mClient->notifyEndOfBitstreamBuffer(bitstream_id);
+}
+
+void C2VDAAdaptorProxy::NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
+    ALOGV("NotifyResetDone");
+    if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
+        ALOGE("Reset is done incorrectly.");
+        NotifyError(result);
+        return;
+    }
+    mClient->notifyResetDone();
+}
+
+void C2VDAAdaptorProxy::NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result) {
+    ALOGV("NotifyFlushDone");
+    if (result == ::arc::mojom::VideoDecodeAccelerator::Result::CANCELLED) {
+        // Flush is cancelled by a succeeding Reset(). A client expects this behavior.
+        ALOGE("Flush is canceled.");
+        return;
+    }
+    if (result != ::arc::mojom::VideoDecodeAccelerator::Result::SUCCESS) {
+        ALOGE("Flush is done incorrectly.");
+        NotifyError(result);
+        return;
+    }
+    mClient->notifyFlushDone();
+}
+
+//static
+media::VideoDecodeAccelerator::SupportedProfiles C2VDAAdaptorProxy::GetSupportedProfiles(
+        uint32_t inputFormatFourcc) {
+    media::VideoDecodeAccelerator::SupportedProfiles profiles(1);
+    profiles[0].min_resolution = media::Size(16, 16);
+    profiles[0].max_resolution = media::Size(4096, 4096);
+    switch (inputFormatFourcc) {
+    case V4L2_PIX_FMT_H264:
+    case V4L2_PIX_FMT_H264_SLICE:
+        profiles[0].profile = media::H264PROFILE_MAIN;
+        break;
+    case V4L2_PIX_FMT_VP8:
+    case V4L2_PIX_FMT_VP8_FRAME:
+        profiles[0].profile = media::VP8PROFILE_ANY;
+        break;
+    case V4L2_PIX_FMT_VP9:
+    case V4L2_PIX_FMT_VP9_FRAME:
+        profiles[0].profile = media::VP9PROFILE_PROFILE0;
+        break;
+    default:
+        ALOGE("Unknown formatfourcc: %d", inputFormatFourcc);
+        return {};
+    }
+    return profiles;
+}
+
+//static
+HalPixelFormat C2VDAAdaptorProxy::ResolveBufferFormat(bool crcb, bool semiplanar) {
+    auto value = std::find_if(std::begin(kSupportedPixelFormats), std::end(kSupportedPixelFormats),
+                              [crcb, semiplanar](const struct SupportedPixelFormat& f) {
+                                  return f.mCrcb == crcb && f.mSemiplanar == semiplanar;
+                              });
+    LOG_ALWAYS_FATAL_IF(value == std::end(kSupportedPixelFormats),
+                        "Unsupported pixel format: (crcb=%d, semiplanar=%d)", crcb, semiplanar);
+    return value->mPixelFormat;
+}
+
+VideoDecodeAcceleratorAdaptor::Result C2VDAAdaptorProxy::initialize(
+        media::VideoCodecProfile profile, bool secureMode,
+        VideoDecodeAcceleratorAdaptor::Client* client) {
+    ALOGV("initialize(profile=%d, secureMode=%d)", static_cast<int>(profile),
+          static_cast<int>(secureMode));
+    DCHECK(client);
+    DCHECK(!mClient);
+    mClient = client;
+
+    if (!establishChannel()) {
+        ALOGE("establishChannel failed");
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+
+    auto future = ::arc::Future<::arc::mojom::VideoDecodeAccelerator::Result>::make_shared(mRelay);
+    mMojoTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAAdaptorProxy::initializeOnMojoThread,
+                                                    base::Unretained(this), profile, secureMode,
+                                                    ::arc::FutureCallback(future)));
+
+    if (!future->wait()) {
+        ALOGE("Connection lost");
+        return VideoDecodeAcceleratorAdaptor::PLATFORM_FAILURE;
+    }
+    return static_cast<VideoDecodeAcceleratorAdaptor::Result>(future->get());
+}
+
+void C2VDAAdaptorProxy::initializeOnMojoThread(
+        const media::VideoCodecProfile profile, const bool secureMode,
+        const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb) {
+    // base::Unretained is safe because we own |mBinding|.
+    auto client = mBinding.CreateInterfacePtrAndBind();
+    mBinding.set_connection_error_handler(base::Bind(&C2VDAAdaptorProxy::onConnectionError,
+                                                     base::Unretained(this),
+                                                     std::string("mBinding (client pipe)")));
+
+    ::arc::mojom::VideoDecodeAcceleratorConfigPtr arcConfig =
+            ::arc::mojom::VideoDecodeAcceleratorConfig::New();
+    arcConfig->secure_mode = secureMode;
+    arcConfig->profile = static_cast<::arc::mojom::VideoCodecProfile>(profile);
+    mVDAPtr->Initialize(std::move(arcConfig), std::move(client), cb);
+}
+
+void C2VDAAdaptorProxy::decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) {
+    ALOGV("decode");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::decodeOnMojoThread, base::Unretained(this),
+                                  bitstreamId, handleFd, offset, size));
+}
+
+void C2VDAAdaptorProxy::decodeOnMojoThread(int32_t bitstreamId, int handleFd, off_t offset,
+                                           uint32_t size) {
+    MojoHandle wrappedHandle;
+    MojoResult wrapResult = mojo::edk::CreatePlatformHandleWrapper(
+            mojo::edk::ScopedPlatformHandle(mojo::edk::PlatformHandle(handleFd)), &wrappedHandle);
+    if (wrapResult != MOJO_RESULT_OK) {
+        ALOGE("failed to wrap handle: %d", static_cast<int>(wrapResult));
+        NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+        return;
+    }
+    auto bufferPtr = ::arc::mojom::BitstreamBuffer::New();
+    bufferPtr->bitstream_id = bitstreamId;
+    bufferPtr->handle_fd = mojo::ScopedHandle(mojo::Handle(wrappedHandle));
+    bufferPtr->offset = offset;
+    bufferPtr->bytes_used = size;
+    mVDAPtr->Decode(std::move(bufferPtr));
+}
+
+void C2VDAAdaptorProxy::assignPictureBuffers(uint32_t numOutputBuffers) {
+    ALOGV("assignPictureBuffers: %d", numOutputBuffers);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread,
+                                         base::Unretained(this), numOutputBuffers));
+}
+
+void C2VDAAdaptorProxy::assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers) {
+    mVDAPtr->AssignPictureBuffers(numOutputBuffers);
+}
+
+void C2VDAAdaptorProxy::importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                               int handleFd,
+                                               const std::vector<VideoFramePlane>& planes) {
+    ALOGV("importBufferForPicture");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE,
+            base::Bind(&C2VDAAdaptorProxy::importBufferForPictureOnMojoThread,
+                       base::Unretained(this), pictureBufferId, format, handleFd, planes));
+}
+
+void C2VDAAdaptorProxy::importBufferForPictureOnMojoThread(
+        int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+        const std::vector<VideoFramePlane>& planes) {
+    MojoHandle wrappedHandle;
+    MojoResult wrapResult = mojo::edk::CreatePlatformHandleWrapper(
+            mojo::edk::ScopedPlatformHandle(mojo::edk::PlatformHandle(handleFd)), &wrappedHandle);
+    if (wrapResult != MOJO_RESULT_OK) {
+        ALOGE("failed to wrap handle: %d", static_cast<int>(wrapResult));
+        NotifyError(::arc::mojom::VideoDecodeAccelerator::Result::PLATFORM_FAILURE);
+        return;
+    }
+
+    mVDAPtr->ImportBufferForPicture(pictureBufferId,
+                                    static_cast<::arc::mojom::HalPixelFormat>(format),
+                                    mojo::ScopedHandle(mojo::Handle(wrappedHandle)),
+                                    mojo::ConvertTo<std::vector<::arc::VideoFramePlane>>(planes));
+}
+
+void C2VDAAdaptorProxy::reusePictureBuffer(int32_t pictureBufferId) {
+    ALOGV("reusePictureBuffer: %d", pictureBufferId);
+    mMojoTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAAdaptorProxy::reusePictureBufferOnMojoThread,
+                                         base::Unretained(this), pictureBufferId));
+}
+
+void C2VDAAdaptorProxy::reusePictureBufferOnMojoThread(int32_t pictureBufferId) {
+    mVDAPtr->ReusePictureBuffer(pictureBufferId);
+}
+
+void C2VDAAdaptorProxy::flush() {
+    ALOGV("flush");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::flushOnMojoThread, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::flushOnMojoThread() {
+    mVDAPtr->Flush(base::Bind(&C2VDAAdaptorProxy::NotifyFlushDone, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::reset() {
+    ALOGV("reset");
+    mMojoTaskRunner->PostTask(
+            FROM_HERE, base::Bind(&C2VDAAdaptorProxy::resetOnMojoThread, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::resetOnMojoThread() {
+    mVDAPtr->Reset(base::Bind(&C2VDAAdaptorProxy::NotifyResetDone, base::Unretained(this)));
+}
+
+void C2VDAAdaptorProxy::destroy() {
+    ALOGV("destroy");
+    ::arc::Future<void> future;
+    ::arc::PostTaskAndSetFutureWithResult(
+            mMojoTaskRunner.get(), FROM_HERE,
+            base::Bind(&C2VDAAdaptorProxy::closeChannelOnMojoThread, base::Unretained(this)),
+            &future);
+    future.get();
+}
+
+void C2VDAAdaptorProxy::closeChannelOnMojoThread() {
+    if (mBinding.is_bound()) mBinding.Close();
+    mVDAPtr.reset();
+}
+
+}  // namespace arc
+}  // namespace android
diff --git a/C2VDAComponent.cpp b/C2VDAComponent.cpp
new file mode 100644
index 0000000..2b3abde
--- /dev/null
+++ b/C2VDAComponent.cpp
@@ -0,0 +1,1132 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAComponent"
+
+#ifdef V4L2_CODEC2_ARC
+#include <C2VDAAdaptorProxy.h>
+#else
+#include <C2VDAAdaptor.h>
+#endif
+
+#define __C2_GENERATE_GLOBAL_VARS__
+#include <C2VDAComponent.h>
+#include <C2VDASupport.h>  // to getParamReflector from vda store
+
+#include <videodev2.h>
+
+#include <C2ComponentFactory.h>
+#include <C2PlatformSupport.h>
+
+#include <base/bind.h>
+#include <base/bind_helpers.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <inttypes.h>
+#include <string.h>
+#include <algorithm>
+
+#define UNUSED(expr)  \
+    do {              \
+        (void)(expr); \
+    } while (0)
+
+namespace android {
+
+namespace {
+
+// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
+int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
+    return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
+}
+
+// Use basic graphic block pool/allocator as default.
+const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
+
+const C2String kH264DecoderName = "c2.vda.avc.decoder";
+const C2String kVP8DecoderName = "c2.vda.vp8.decoder";
+const C2String kVP9DecoderName = "c2.vda.vp9.decoder";
+
+}  // namespace
+
+C2VDAComponent::IntfImpl::IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper)
+      : C2InterfaceHelper(helper), mInitStatus(C2_OK) {
+    setDerivedInstance(this);
+
+    // TODO(johnylin): use factory function to determine whether V4L2 stream or slice API is.
+    uint32_t inputFormatFourcc;
+    char inputMime[128];
+    if (name == kH264DecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_AVC);
+        inputFormatFourcc = V4L2_PIX_FMT_H264_SLICE;
+    } else if (name == kVP8DecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP8);
+        inputFormatFourcc = V4L2_PIX_FMT_VP8_FRAME;
+    } else if (name == kVP9DecoderName) {
+        strcpy(inputMime, MEDIA_MIMETYPE_VIDEO_VP9);
+        inputFormatFourcc = V4L2_PIX_FMT_VP9_FRAME;
+    } else {
+        ALOGE("Invalid component name: %s", name.c_str());
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+    // Get supported profiles from VDA.
+    // TODO: re-think the suitable method of getting supported profiles for both pure Android and
+    //       ARC++.
+    media::VideoDecodeAccelerator::SupportedProfiles supportedProfiles;
+#ifdef V4L2_CODEC2_ARC
+    supportedProfiles = arc::C2VDAAdaptorProxy::GetSupportedProfiles(inputFormatFourcc);
+#else
+    supportedProfiles = C2VDAAdaptor::GetSupportedProfiles(inputFormatFourcc);
+#endif
+    if (supportedProfiles.empty()) {
+        ALOGE("No supported profile from input format: %u", inputFormatFourcc);
+        mInitStatus = C2_BAD_VALUE;
+        return;
+    }
+
+    mCodecProfile = supportedProfiles[0].profile;
+
+    auto minSize = supportedProfiles[0].min_resolution;
+    auto maxSize = supportedProfiles[0].max_resolution;
+
+    addParameter(
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                    .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2FormatCompressed))
+                    .build());
+
+    addParameter(DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                         .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2FormatVideo))
+                         .build());
+
+    addParameter(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                    .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(inputMime))
+                    .build());
+
+    addParameter(DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                         .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+                                 MEDIA_MIMETYPE_VIDEO_RAW))
+                         .build());
+
+    struct LocalSetter {
+        static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::output>& videoSize) {
+            (void)mayBlock;
+            // TODO: maybe apply block limit?
+            return videoSize.F(videoSize.v.width)
+                    .validatePossible(videoSize.v.width)
+                    .plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
+        }
+    };
+
+    addParameter(DefineParam(mSize, C2_PARAMKEY_STREAM_PICTURE_SIZE)
+                         .withDefault(new C2StreamPictureSizeInfo::output(0u, 176, 144))
+                         .withFields({
+                                 C2F(mSize, width).inRange(minSize.width(), maxSize.width(), 16),
+                                 C2F(mSize, height).inRange(minSize.height(), maxSize.height(), 16),
+                         })
+                         .withSetter(LocalSetter::SizeSetter)
+                         .build());
+
+    C2Allocator::id_t inputAllocators[] = {C2PlatformAllocatorStore::ION};
+    C2Allocator::id_t outputAllocators[] = {C2PlatformAllocatorStore::GRALLOC};
+
+    addParameter(
+            DefineParam(mInputAllocatorIds, C2_PARAMKEY_INPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
+                    .build());
+
+    addParameter(
+            DefineParam(mOutputAllocatorIds, C2_PARAMKEY_OUTPUT_ALLOCATORS)
+                    .withConstValue(C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
+                    .build());
+
+    C2BlockPool::local_id_t outputBlockPools[] = {kDefaultOutputBlockPool};
+
+    addParameter(
+            DefineParam(mOutputBlockPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
+                    .withDefault(C2PortBlockPoolsTuning::output::AllocShared(outputBlockPools))
+                    .withFields({C2F(mOutputBlockPoolIds, m.values[0]).any(),
+                                 C2F(mOutputBlockPoolIds, m.values).inRange(0, 1)})
+                    .withSetter(Setter<C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
+                    .build());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+#define EXPECT_STATE_OR_RETURN_ON_ERROR(x)                    \
+    do {                                                      \
+        if (mComponentState == ComponentState::ERROR) return; \
+        CHECK_EQ(mComponentState, ComponentState::x);         \
+    } while (0)
+
+#define EXPECT_RUNNING_OR_RETURN_ON_ERROR()                       \
+    do {                                                          \
+        if (mComponentState == ComponentState::ERROR) return;     \
+        CHECK_NE(mComponentState, ComponentState::UNINITIALIZED); \
+    } while (0)
+
+class C2VDAGraphicBuffer : public C2Buffer {
+public:
+    C2VDAGraphicBuffer(const std::shared_ptr<C2GraphicBlock>& block, const media::Rect& visibleRect,
+                       const base::Closure& releaseCB);
+    ~C2VDAGraphicBuffer() override;
+
+private:
+    base::Closure mReleaseCB;
+};
+
+C2VDAGraphicBuffer::C2VDAGraphicBuffer(const std::shared_ptr<C2GraphicBlock>& block,
+                                       const media::Rect& visibleRect,
+                                       const base::Closure& releaseCB)
+      : C2Buffer({block->share(C2Rect(visibleRect.width(), visibleRect.height()), C2Fence())}),
+        mReleaseCB(releaseCB) {}
+
+C2VDAGraphicBuffer::~C2VDAGraphicBuffer() {
+    if (!mReleaseCB.is_null()) {
+        mReleaseCB.Run();
+    }
+}
+
+C2VDAComponent::VideoFormat::VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers,
+                                         media::Size codedSize, media::Rect visibleRect)
+      : mPixelFormat(pixelFormat),
+        mMinNumBuffers(minNumBuffers),
+        mCodedSize(codedSize),
+        mVisibleRect(visibleRect) {}
+
+C2VDAComponent::C2VDAComponent(C2String name, c2_node_id_t id,
+                               const std::shared_ptr<C2ReflectorHelper>& helper)
+      : mIntfImpl(std::make_shared<IntfImpl>(name, helper)),
+        mIntf(std::make_shared<SimpleInterface<IntfImpl>>(name.c_str(), id, mIntfImpl)),
+        mThread("C2VDAComponentThread"),
+        mVDAInitResult(VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE),
+        mComponentState(ComponentState::UNINITIALIZED),
+        mDrainWithEOS(false),
+        mLastOutputTimestamp(-1),
+        mCodecProfile(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+        mState(State::UNLOADED),
+        mWeakThisFactory(this) {
+    // TODO(johnylin): the client may need to know if init is failed.
+    if (mIntfImpl->status() != C2_OK) {
+        ALOGE("Component interface init failed (err code = %d)", mIntfImpl->status());
+        return;
+    }
+    if (!mThread.Start()) {
+        ALOGE("Component thread failed to start.");
+        return;
+    }
+    mTaskRunner = mThread.task_runner();
+    mState.store(State::LOADED);
+}
+
+C2VDAComponent::~C2VDAComponent() {
+    CHECK_EQ(mState.load(), State::LOADED);
+
+    if (mThread.IsRunning()) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAComponent::onDestroy, base::Unretained(this)));
+        mThread.Stop();
+    }
+}
+
+void C2VDAComponent::onDestroy() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDestroy");
+    if (mVDAAdaptor.get()) {
+        mVDAAdaptor->destroy();
+        mVDAAdaptor.reset(nullptr);
+    }
+}
+
+void C2VDAComponent::onStart(media::VideoCodecProfile profile, base::WaitableEvent* done) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onStart");
+    CHECK_EQ(mComponentState, ComponentState::UNINITIALIZED);
+
+#ifdef V4L2_CODEC2_ARC
+    mVDAAdaptor.reset(new arc::C2VDAAdaptorProxy());
+#else
+    mVDAAdaptor.reset(new C2VDAAdaptor());
+#endif
+
+    // TODO: Set secureMode value dynamically.
+    bool secureMode = false;
+    mVDAInitResult = mVDAAdaptor->initialize(profile, secureMode, this);
+    if (mVDAInitResult == VideoDecodeAcceleratorAdaptor::Result::SUCCESS) {
+        mComponentState = ComponentState::STARTED;
+    }
+
+    done->Signal();
+}
+
+void C2VDAComponent::onQueueWork(std::unique_ptr<C2Work> work) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onQueueWork: flags=0x%x, index=%llu, timestamp=%llu", work->input.flags,
+          work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+    // It is illegal for client to put new works while component is still flushing.
+    CHECK_NE(mComponentState, ComponentState::FLUSHING);
+
+    uint32_t drainMode = NO_DRAIN;
+    if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+        drainMode = DRAIN_COMPONENT_WITH_EOS;
+    }
+    mQueue.push({std::move(work), drainMode});
+    // TODO(johnylin): set a maximum size of mQueue and check if mQueue is already full.
+
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onDequeueWork, base::Unretained(this)));
+}
+
+void C2VDAComponent::onDequeueWork() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDequeueWork");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+    if (mQueue.empty()) {
+        return;
+    }
+    if (mComponentState == ComponentState::DRAINING) {
+        ALOGV("Temporarily stop dequeueing works since component is draining.");
+        return;
+    }
+    if (mComponentState != ComponentState::STARTED) {
+        ALOGE("Work queue should be empty if the component is not in STARTED state.");
+        return;
+    }
+
+    // Dequeue a work from mQueue.
+    std::unique_ptr<C2Work> work(std::move(mQueue.front().mWork));
+    auto drainMode = mQueue.front().mDrainMode;
+    mQueue.pop();
+
+    CHECK_EQ(work->input.buffers.size(), 1u);
+    C2ConstLinearBlock linearBlock = work->input.buffers.front()->data().linearBlocks().front();
+    // linearBlock.size() == 0 means this is a dummy work. No decode needed.
+    if (linearBlock.size() > 0) {
+        // Send input buffer to VDA for decode.
+        // Use frameIndex as bitstreamId.
+        int32_t bitstreamId = frameIndexToBitstreamId(work->input.ordinal.frameIndex);
+        sendInputBufferToAccelerator(linearBlock, bitstreamId);
+    }
+
+    CHECK_EQ(work->worklets.size(), 1u);
+    work->worklets.front()->output.flags = static_cast<C2FrameData::flags_t>(0);
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+
+    if (drainMode != NO_DRAIN) {
+        mVDAAdaptor->flush();
+        mComponentState = ComponentState::DRAINING;
+        mDrainWithEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
+    }
+
+    // Put work to mPendingWorks.
+    mPendingWorks.emplace_back(std::move(work));
+
+    if (!mQueue.empty()) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAComponent::onDequeueWork, base::Unretained(this)));
+    }
+}
+
+void C2VDAComponent::onInputBufferDone(int32_t bitstreamId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onInputBufferDone: bitstream id=%d", bitstreamId);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    C2Work* work = getPendingWorkByBitstreamId(bitstreamId);
+    if (!work) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+
+    // When the work is done, the input buffer shall be reset by component.
+    work->input.buffers.front().reset();
+
+    reportFinishedWorkIfAny();
+}
+
+// This is used as callback while output buffer is released by client.
+// TODO(johnylin): consider to use C2Buffer::registerOnDestroyNotify instead
+void C2VDAComponent::returnOutputBuffer(int32_t pictureBufferId) {
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onOutputBufferReturned,
+                                                base::Unretained(this), pictureBufferId));
+}
+
+void C2VDAComponent::onOutputBufferReturned(int32_t pictureBufferId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputBufferReturned: picture id=%d", pictureBufferId);
+    if (mComponentState == ComponentState::UNINITIALIZED) {
+        // Output buffer is returned from client after component is stopped. Just let the buffer be
+        // released.
+        return;
+    }
+
+    // TODO(johnylin): when buffer is returned, we should confirm that output format is not changed
+    //                 yet. If changed, just let the buffer be released.
+    GraphicBlockInfo* info = getGraphicBlockById(pictureBufferId);
+    if (!info) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_CLIENT);
+    info->mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
+
+    if (mPendingOutputFormat) {
+        tryChangeOutputFormat();
+    } else {
+        sendOutputBufferToAccelerator(info);
+    }
+}
+
+void C2VDAComponent::onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputBufferDone: picture id=%d, bitstream id=%d", pictureBufferId, bitstreamId);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    C2Work* work = getPendingWorkByBitstreamId(bitstreamId);
+    if (!work) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    GraphicBlockInfo* info = getGraphicBlockById(pictureBufferId);
+    if (!info) {
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_ACCELERATOR);
+    // Output buffer will be passed to client soon along with mListener->onWorkDone_nb().
+    info->mState = GraphicBlockInfo::State::OWNED_BY_CLIENT;
+
+    // Attach output buffer to the work corresponded to bitstreamId.
+    work->worklets.front()->output.buffers.emplace_back(std::make_shared<C2VDAGraphicBuffer>(
+            info->mGraphicBlock, mOutputFormat.mVisibleRect,
+            base::Bind(&C2VDAComponent::returnOutputBuffer, mWeakThisFactory.GetWeakPtr(),
+                       pictureBufferId)));
+
+    // TODO: this does not work for timestamps as they can wrap around
+    int64_t currentTimestamp = base::checked_cast<int64_t>(work->input.ordinal.timestamp.peek());
+    CHECK_GE(currentTimestamp, mLastOutputTimestamp);
+    mLastOutputTimestamp = currentTimestamp;
+
+    reportFinishedWorkIfAny();
+}
+
+void C2VDAComponent::onDrain(uint32_t drainMode) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDrain: mode = %u", drainMode);
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    if (!mQueue.empty()) {
+        // Mark last queued work as "drain-till-here" by setting drainMode. Do not change drainMode
+        // if last work already has one.
+        if (mQueue.back().mDrainMode == NO_DRAIN) {
+            mQueue.back().mDrainMode = drainMode;
+        }
+    } else if (!mPendingWorks.empty()) {
+        // Neglect drain request if component is not in STARTED mode. Otherwise, enters DRAINING
+        // mode and signal VDA flush immediately.
+        if (mComponentState == ComponentState::STARTED) {
+            mVDAAdaptor->flush();
+            mComponentState = ComponentState::DRAINING;
+            mDrainWithEOS = drainMode == DRAIN_COMPONENT_WITH_EOS;
+        } else {
+            ALOGV("Neglect drain. Component in state: %d", mComponentState);
+        }
+    } else {
+        // Do nothing.
+        ALOGV("No buffers in VDA, drain takes no effect.");
+    }
+}
+
+void C2VDAComponent::onDrainDone() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onDrainDone");
+    if (mComponentState == ComponentState::DRAINING) {
+        mComponentState = ComponentState::STARTED;
+    } else if (mComponentState == ComponentState::STOPPING) {
+        // The client signals stop right before VDA notifies drain done. Let stop process goes.
+        return;
+    } else {
+        ALOGE("Unexpected state while onDrainDone(). State=%d", mComponentState);
+        reportError(C2_BAD_STATE);
+        return;
+    }
+
+    if (mDrainWithEOS) {
+        // Return EOS work.
+        reportEOSWork();
+    }
+    // mPendingWorks must be empty after draining is finished.
+    CHECK(mPendingWorks.empty());
+
+    // Last stream is finished. Reset the timestamp record.
+    mLastOutputTimestamp = -1;
+
+    // Work dequeueing was stopped while component draining. Restart it.
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onDequeueWork, base::Unretained(this)));
+}
+
+void C2VDAComponent::onFlush() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onFlush");
+    if (mComponentState == ComponentState::FLUSHING) {
+        return;  // Ignore other flush request when component is flushing.
+    }
+    EXPECT_STATE_OR_RETURN_ON_ERROR(STARTED);
+
+    mVDAAdaptor->reset();
+    // Pop all works in mQueue and put into mPendingWorks.
+    while (!mQueue.empty()) {
+        mPendingWorks.emplace_back(std::move(mQueue.front().mWork));
+        mQueue.pop();
+    }
+    mComponentState = ComponentState::FLUSHING;
+}
+
+void C2VDAComponent::onStop(base::WaitableEvent* done) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onStop");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    // Do not request VDA reset again before the previous one is done. If reset is already sent by
+    // onFlush(), just regard the following NotifyResetDone callback as for stopping.
+    if (mComponentState != ComponentState::FLUSHING) {
+        mVDAAdaptor->reset();
+    }
+
+    // Pop all works in mQueue and put into mPendingWorks.
+    while (!mQueue.empty()) {
+        mPendingWorks.emplace_back(std::move(mQueue.front().mWork));
+        mQueue.pop();
+    }
+
+    mStopDoneEvent = done;  // restore done event which shoud be signaled in onStopDone().
+    mComponentState = ComponentState::STOPPING;
+}
+
+void C2VDAComponent::onResetDone() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    if (mComponentState == ComponentState::ERROR) {
+        return;
+    }
+    if (mComponentState == ComponentState::FLUSHING) {
+        onFlushDone();
+    } else if (mComponentState == ComponentState::STOPPING) {
+        onStopDone();
+    } else {
+        reportError(C2_CORRUPTED);
+    }
+}
+
+void C2VDAComponent::onFlushDone() {
+    ALOGV("onFlushDone");
+    reportAbandonedWorks();
+    // Reset the timestamp record.
+    mLastOutputTimestamp = -1;
+    mComponentState = ComponentState::STARTED;
+}
+
+void C2VDAComponent::onStopDone() {
+    ALOGV("onStopDone");
+    CHECK(mStopDoneEvent);
+
+    // Release the graphic block allocator object.
+    mOutputBlockPool.reset();
+
+    // TODO(johnylin): At this moment, there may be C2Buffer still owned by client, do we need to
+    // do something for them?
+    reportAbandonedWorks();
+    mPendingOutputFormat.reset();
+    mLastOutputTimestamp = -1;
+    if (mVDAAdaptor.get()) {
+        mVDAAdaptor->destroy();
+        mVDAAdaptor.reset(nullptr);
+    }
+
+    mGraphicBlocks.clear();
+
+    mStopDoneEvent->Signal();
+    mStopDoneEvent = nullptr;
+    mComponentState = ComponentState::UNINITIALIZED;
+}
+
+c2_status_t C2VDAComponent::setListener_vb(const std::shared_ptr<C2Component::Listener>& listener,
+                                           c2_blocking_t mayBlock) {
+    UNUSED(mayBlock);
+    // TODO(johnylin): API says this method must be supported in all states, however I'm quite not
+    //                 sure what is the use case.
+    if (mState.load() != State::LOADED) {
+        return C2_BAD_STATE;
+    }
+    mListener = listener;
+    return C2_OK;
+}
+
+void C2VDAComponent::sendInputBufferToAccelerator(const C2ConstLinearBlock& input,
+                                                  int32_t bitstreamId) {
+    ALOGV("sendInputBufferToAccelerator");
+    int dupFd = dup(input.handle()->data[0]);
+    if (dupFd < 0) {
+        ALOGE("Failed to dup(%d) input buffer (bitstreamId=%d), errno=%d", input.handle()->data[0],
+              bitstreamId, errno);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    ALOGV("Decode bitstream ID: %d, offset: %u size: %u", bitstreamId, input.offset(),
+          input.size());
+    mVDAAdaptor->decode(bitstreamId, dupFd, input.offset(), input.size());
+}
+
+C2Work* C2VDAComponent::getPendingWorkByBitstreamId(int32_t bitstreamId) {
+    auto workIter = std::find_if(mPendingWorks.begin(), mPendingWorks.end(),
+                                 [bitstreamId](const std::unique_ptr<C2Work>& w) {
+                                     return frameIndexToBitstreamId(w->input.ordinal.frameIndex) ==
+                                            bitstreamId;
+                                 });
+
+    if (workIter == mPendingWorks.end()) {
+        ALOGE("Can't find pending work by bitstream ID: %d", bitstreamId);
+        return nullptr;
+    }
+    return workIter->get();
+}
+
+C2VDAComponent::GraphicBlockInfo* C2VDAComponent::getGraphicBlockById(int32_t blockId) {
+    if (blockId < 0 || blockId >= static_cast<int32_t>(mGraphicBlocks.size())) {
+        ALOGE("getGraphicBlockById failed: id=%d", blockId);
+        return nullptr;
+    }
+    return &mGraphicBlocks[blockId];
+}
+
+void C2VDAComponent::onOutputFormatChanged(std::unique_ptr<VideoFormat> format) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onOutputFormatChanged");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    ALOGV("New output format(pixel_format=0x%x, min_num_buffers=%u, coded_size=%s, crop_rect=%s)",
+          static_cast<uint32_t>(format->mPixelFormat), format->mMinNumBuffers,
+          format->mCodedSize.ToString().c_str(), format->mVisibleRect.ToString().c_str());
+
+    for (auto& info : mGraphicBlocks) {
+        if (info.mState == GraphicBlockInfo::State::OWNED_BY_ACCELERATOR)
+            info.mState = GraphicBlockInfo::State::OWNED_BY_COMPONENT;
+    }
+
+    CHECK(!mPendingOutputFormat);
+    mPendingOutputFormat = std::move(format);
+    tryChangeOutputFormat();
+}
+
+void C2VDAComponent::tryChangeOutputFormat() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("tryChangeOutputFormat");
+    CHECK(mPendingOutputFormat);
+
+    // Change the output format only after all output buffers are returned
+    // from clients.
+    // TODO(johnylin): don't need to wait for new proposed buffer flow.
+    for (const auto& info : mGraphicBlocks) {
+        if (info.mState == GraphicBlockInfo::State::OWNED_BY_CLIENT) {
+            ALOGV("wait buffer: %d for output format change", info.mBlockId);
+            return;
+        }
+    }
+
+    CHECK_EQ(mPendingOutputFormat->mPixelFormat, HalPixelFormat::YCbCr_420_888);
+
+    mOutputFormat.mPixelFormat = mPendingOutputFormat->mPixelFormat;
+    mOutputFormat.mMinNumBuffers = mPendingOutputFormat->mMinNumBuffers;
+    mOutputFormat.mCodedSize = mPendingOutputFormat->mCodedSize;
+
+    setOutputFormatCrop(mPendingOutputFormat->mVisibleRect);
+
+    c2_status_t err = allocateBuffersFromBlockAllocator(
+            mPendingOutputFormat->mCodedSize,
+            static_cast<uint32_t>(mPendingOutputFormat->mPixelFormat));
+    if (err != C2_OK) {
+        reportError(err);
+        return;
+    }
+
+    for (auto& info : mGraphicBlocks) {
+        sendOutputBufferToAccelerator(&info);
+    }
+    mPendingOutputFormat.reset();
+}
+
+c2_status_t C2VDAComponent::allocateBuffersFromBlockAllocator(const media::Size& size,
+                                                              uint32_t pixelFormat) {
+    ALOGV("allocateBuffersFromBlockAllocator(%s, 0x%x)", size.ToString().c_str(), pixelFormat);
+
+    size_t bufferCount = mOutputFormat.mMinNumBuffers + kDpbOutputBufferExtraCount;
+
+    // Allocate the output buffers.
+    mVDAAdaptor->assignPictureBuffers(bufferCount);
+
+    // Get block pool ID configured from the client.
+    auto poolId = mIntfImpl->getBlockPoolId();
+    ALOGI("Using C2BlockPool ID = %" PRIu64 " for allocating output buffers", poolId);
+    c2_status_t err;
+    if (!mOutputBlockPool || mOutputBlockPool->getLocalId() != poolId) {
+        err = GetCodec2BlockPool(poolId, shared_from_this(), &mOutputBlockPool);
+        if (err != C2_OK) {
+            ALOGE("Graphic block allocator is invalid");
+            reportError(err);
+            return err;
+        }
+    }
+
+    mGraphicBlocks.clear();
+    for (size_t i = 0; i < bufferCount; ++i) {
+        std::shared_ptr<C2GraphicBlock> block;
+        C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, 0};
+        err = mOutputBlockPool->fetchGraphicBlock(size.width(), size.height(), pixelFormat, usage,
+                                                  &block);
+        if (err != C2_OK) {
+            mGraphicBlocks.clear();
+            ALOGE("failed to allocate buffer: %d", err);
+            reportError(err);
+            return err;
+        }
+        appendOutputBuffer(std::move(block));
+    }
+    mOutputFormat.mMinNumBuffers = bufferCount;
+    return C2_OK;
+}
+
+void C2VDAComponent::appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block) {
+    GraphicBlockInfo info;
+    info.mBlockId = static_cast<int32_t>(mGraphicBlocks.size());
+    info.mGraphicBlock = std::move(block);
+
+    C2ConstGraphicBlock constBlock = info.mGraphicBlock->share(
+            C2Rect(info.mGraphicBlock->width(), info.mGraphicBlock->height()), C2Fence());
+
+    const C2GraphicView& view = constBlock.map().get();
+    const uint8_t* const* data = view.data();
+    CHECK_NE(data, nullptr);
+    const C2PlanarLayout& layout = view.layout();
+
+    ALOGV("allocate graphic buffer: %p, id: %d, size: %dx%d", info.mGraphicBlock->handle(),
+          info.mBlockId, info.mGraphicBlock->width(), info.mGraphicBlock->height());
+
+    // get offset from data pointers
+    uint32_t offsets[C2PlanarLayout::MAX_NUM_PLANES];
+    auto baseAddress = reinterpret_cast<intptr_t>(data[0]);
+    for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+        auto planeAddress = reinterpret_cast<intptr_t>(data[i]);
+        offsets[i] = static_cast<uint32_t>(planeAddress - baseAddress);
+    }
+
+    bool crcb = false;
+    if (layout.numPlanes == 3 &&
+        offsets[C2PlanarLayout::PLANE_U] > offsets[C2PlanarLayout::PLANE_V]) {
+        // YCrCb format
+        std::swap(offsets[C2PlanarLayout::PLANE_U], offsets[C2PlanarLayout::PLANE_V]);
+        crcb = true;
+    }
+
+    bool semiplanar = false;
+    uint32_t passedNumPlanes = layout.numPlanes;
+    if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) {  // chroma_step
+        // Semi-planar format
+        passedNumPlanes--;
+        semiplanar = true;
+    }
+
+    for (uint32_t i = 0; i < passedNumPlanes; ++i) {
+        ALOGV("plane %u: stride: %d, offset: %u", i, layout.planes[i].rowInc, offsets[i]);
+    }
+#ifdef V4L2_CODEC2_ARC
+    info.mPixelFormat = arc::C2VDAAdaptorProxy::ResolveBufferFormat(crcb, semiplanar);
+#else
+    info.mPixelFormat = C2VDAAdaptor::ResolveBufferFormat(crcb, semiplanar);
+#endif
+    ALOGV("HAL pixel format: 0x%x", static_cast<uint32_t>(info.mPixelFormat));
+
+    base::ScopedFD passedHandle(dup(info.mGraphicBlock->handle()->data[0]));
+    if (!passedHandle.is_valid()) {
+        ALOGE("Failed to dup(%d), errno=%d", info.mGraphicBlock->handle()->data[0], errno);
+        reportError(C2_CORRUPTED);
+        return;
+    }
+    std::vector<VideoFramePlane> passedPlanes;
+    for (uint32_t i = 0; i < passedNumPlanes; ++i) {
+        CHECK_GT(layout.planes[i].rowInc, 0);
+        passedPlanes.push_back({offsets[i], static_cast<uint32_t>(layout.planes[i].rowInc)});
+    }
+    info.mHandle = std::move(passedHandle);
+    info.mPlanes = std::move(passedPlanes);
+
+    mGraphicBlocks.push_back(std::move(info));
+}
+
+void C2VDAComponent::sendOutputBufferToAccelerator(GraphicBlockInfo* info) {
+    ALOGV("sendOutputBufferToAccelerator index=%d", info->mBlockId);
+    CHECK_EQ(info->mState, GraphicBlockInfo::State::OWNED_BY_COMPONENT);
+    info->mState = GraphicBlockInfo::State::OWNED_BY_ACCELERATOR;
+
+    // is_valid() is true for the first time the buffer is passed to VDA. In that case, VDA needs to
+    // import the buffer first.
+    if (info->mHandle.is_valid()) {
+        mVDAAdaptor->importBufferForPicture(info->mBlockId, info->mPixelFormat,
+                                            info->mHandle.release(), info->mPlanes);
+    } else {
+        mVDAAdaptor->reusePictureBuffer(info->mBlockId);
+    }
+}
+
+void C2VDAComponent::onVisibleRectChanged(const media::Rect& cropRect) {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    ALOGV("onVisibleRectChanged");
+    EXPECT_RUNNING_OR_RETURN_ON_ERROR();
+
+    // We should make sure there is no pending output format change. That is, the input cropRect is
+    // corresponding to current output format.
+    CHECK(mPendingOutputFormat == nullptr);
+    setOutputFormatCrop(cropRect);
+}
+
+void C2VDAComponent::setOutputFormatCrop(const media::Rect& cropRect) {
+    ALOGV("setOutputFormatCrop(%dx%d)", cropRect.width(), cropRect.height());
+    // This visible rect should be set as crop window for each C2ConstGraphicBlock passed to
+    // framework.
+    mOutputFormat.mVisibleRect = cropRect;
+}
+
+c2_status_t C2VDAComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    while (!items->empty()) {
+        mTaskRunner->PostTask(FROM_HERE,
+                              base::Bind(&C2VDAComponent::onQueueWork, base::Unretained(this),
+                                         base::Passed(&items->front())));
+        items->pop_front();
+    }
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
+    UNUSED(items);
+    return C2_OMITTED;  // Tunneling is not supported by now
+}
+
+c2_status_t C2VDAComponent::flush_sm(flush_mode_t mode,
+                                     std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+    if (mode != FLUSH_COMPONENT) {
+        return C2_OMITTED;  // Tunneling is not supported by now
+    }
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onFlush, base::Unretained(this)));
+    // Instead of |flushedWork|, abandoned works will be returned via onWorkDone_nb() callback.
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::drain_nb(drain_mode_t mode) {
+    if (mode != DRAIN_COMPONENT_WITH_EOS && mode != DRAIN_COMPONENT_NO_EOS) {
+        return C2_OMITTED;  // Tunneling is not supported by now
+    }
+    if (mState.load() != State::RUNNING) {
+        return C2_BAD_STATE;
+    }
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onDrain, base::Unretained(this),
+                                                static_cast<uint32_t>(mode)));
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::start() {
+    // Use mStartStopLock to block other asynchronously start/stop calls.
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    if (mState.load() != State::LOADED) {
+        return C2_BAD_STATE;  // start() is only supported when component is in LOADED state.
+    }
+
+    mCodecProfile = mIntfImpl->getCodecProfile();
+    ALOGI("get parameter: mCodecProfile = %d", static_cast<int>(mCodecProfile));
+
+    base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                             base::WaitableEvent::InitialState::NOT_SIGNALED);
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onStart, base::Unretained(this),
+                                                mCodecProfile, &done));
+    done.Wait();
+    if (mVDAInitResult != VideoDecodeAcceleratorAdaptor::Result::SUCCESS) {
+        ALOGE("Failed to start component due to VDA error: %d", static_cast<int>(mVDAInitResult));
+        return C2_CORRUPTED;
+    }
+    mState.store(State::RUNNING);
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::stop() {
+    // Use mStartStopLock to block other asynchronously start/stop calls.
+    std::lock_guard<std::mutex> lock(mStartStopLock);
+
+    auto state = mState.load();
+    if (!(state == State::RUNNING || state == State::ERROR)) {
+        return C2_OK;  // Component is already in stopped state.
+    }
+
+    base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                             base::WaitableEvent::InitialState::NOT_SIGNALED);
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onStop, base::Unretained(this), &done));
+    done.Wait();
+    mState.store(State::LOADED);
+    return C2_OK;
+}
+
+c2_status_t C2VDAComponent::reset() {
+    return stop();
+    // TODO(johnylin): reset is different than stop that it could be called in any state.
+    // TODO(johnylin): when reset is called, set ComponentInterface to default values.
+}
+
+c2_status_t C2VDAComponent::release() {
+    return reset();
+}
+
+std::shared_ptr<C2ComponentInterface> C2VDAComponent::intf() {
+    return mIntf;
+}
+
+void C2VDAComponent::providePictureBuffers(uint32_t minNumBuffers, const media::Size& codedSize) {
+    // Always use fexible pixel 420 format YCbCr_420_888 in Android.
+    // Uses coded size for crop rect while it is not available.
+    auto format = std::make_unique<VideoFormat>(HalPixelFormat::YCbCr_420_888, minNumBuffers,
+                                                codedSize, media::Rect(codedSize));
+
+    // Set mRequestedVisibleRect to default.
+    mRequestedVisibleRect = media::Rect();
+
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onOutputFormatChanged,
+                                                base::Unretained(this), base::Passed(&format)));
+}
+
+void C2VDAComponent::dismissPictureBuffer(int32_t pictureBufferId) {
+    UNUSED(pictureBufferId);
+    // no ops
+}
+
+void C2VDAComponent::pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                                  const media::Rect& cropRect) {
+    UNUSED(pictureBufferId);
+    UNUSED(bitstreamId);
+
+    if (mRequestedVisibleRect != cropRect) {
+        mRequestedVisibleRect = cropRect;
+        mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onVisibleRectChanged,
+                                                    base::Unretained(this), cropRect));
+    }
+
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onOutputBufferDone, base::Unretained(this),
+                                     pictureBufferId, bitstreamId));
+}
+
+void C2VDAComponent::notifyEndOfBitstreamBuffer(int32_t bitstreamId) {
+    mTaskRunner->PostTask(FROM_HERE, base::Bind(&C2VDAComponent::onInputBufferDone,
+                                                base::Unretained(this), bitstreamId));
+}
+
+void C2VDAComponent::notifyFlushDone() {
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onDrainDone, base::Unretained(this)));
+}
+
+void C2VDAComponent::notifyResetDone() {
+    mTaskRunner->PostTask(FROM_HERE,
+                          base::Bind(&C2VDAComponent::onResetDone, base::Unretained(this)));
+}
+
+void C2VDAComponent::notifyError(VideoDecodeAcceleratorAdaptor::Result error) {
+    ALOGE("Got notifyError from VDA error=%d", error);
+    c2_status_t err;
+    switch (error) {
+    case VideoDecodeAcceleratorAdaptor::Result::ILLEGAL_STATE:
+        err = C2_BAD_STATE;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::INVALID_ARGUMENT:
+    case VideoDecodeAcceleratorAdaptor::Result::UNREADABLE_INPUT:
+        err = C2_BAD_VALUE;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::PLATFORM_FAILURE:
+        err = C2_CORRUPTED;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::INSUFFICIENT_RESOURCES:
+        err = C2_NO_MEMORY;
+        break;
+    case VideoDecodeAcceleratorAdaptor::Result::SUCCESS:
+        ALOGE("Shouldn't get SUCCESS err code in NotifyError(). Skip it...");
+        return;
+    }
+    reportError(err);
+}
+
+void C2VDAComponent::reportFinishedWorkIfAny() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    std::list<std::unique_ptr<C2Work>> finishedWorks;
+
+    // Work should be reported as done if both input and output buffer are returned by VDA.
+
+    // Note that not every input buffer has matched output (ex. CSD header for H.264).
+    // However, the timestamp is guaranteed to be monotonic increasing for buffers in display order.
+    // That is, since VDA output is in display order, if we get a returned output with timestamp T,
+    // it implies all works with timestamp <= T are done.
+    // EOS work will not be reported here. reportEOSWork() does it.
+    auto iter = mPendingWorks.begin();
+    while (iter != mPendingWorks.end()) {
+        if (isWorkDone(iter->get())) {
+            iter->get()->result = C2_OK;
+            iter->get()->workletsProcessed = static_cast<uint32_t>(iter->get()->worklets.size());
+            finishedWorks.emplace_back(std::move(*iter));
+            iter = mPendingWorks.erase(iter);
+        } else {
+            ++iter;
+        }
+    }
+
+    if (!finishedWorks.empty()) {
+        mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
+    }
+}
+
+bool C2VDAComponent::isWorkDone(const C2Work* work) const {
+    if (work->input.buffers.front()) {
+        // Input buffer is still owned by VDA.
+        // This condition could also recognize dummy EOS work since it won't get
+        // onInputBufferDone(), input buffer won't be reset until reportEOSWork().
+        return false;
+    }
+    if (mComponentState == ComponentState::DRAINING && mDrainWithEOS &&
+        mPendingWorks.size() == 1u) {
+        // If component is in DRAINING state and mDrainWithEOS is true. The last returned work
+        // should be marked EOS flag and returned by reportEOSWork() instead.
+        return false;
+    }
+    if (mLastOutputTimestamp < 0) {
+        return false;  // No output buffer is returned yet.
+    }
+    if (work->input.ordinal.timestamp > static_cast<uint64_t>(mLastOutputTimestamp)) {
+        return false;  // Output buffer is not returned by VDA yet.
+    }
+    return true;  // Output buffer is returned, or it has no related output buffer.
+}
+
+void C2VDAComponent::reportEOSWork() {
+    ALOGV("reportEOSWork");
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    // In this moment all works prior to EOS work should be done and returned to listener.
+    if (mPendingWorks.size() != 1u) {  // only EOS work left
+        ALOGE("It shouldn't have remaining works in mPendingWorks except EOS work.");
+        reportError(C2_CORRUPTED);
+        return;
+    }
+
+    std::unique_ptr<C2Work> eosWork(std::move(mPendingWorks.front()));
+    mPendingWorks.pop_front();
+    eosWork->input.buffers.front().reset();
+    eosWork->result = C2_OK;
+    eosWork->workletsProcessed = static_cast<uint32_t>(eosWork->worklets.size());
+    eosWork->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
+
+    std::list<std::unique_ptr<C2Work>> finishedWorks;
+    finishedWorks.emplace_back(std::move(eosWork));
+    mListener->onWorkDone_nb(shared_from_this(), std::move(finishedWorks));
+}
+
+void C2VDAComponent::reportAbandonedWorks() {
+    DCHECK(mTaskRunner->BelongsToCurrentThread());
+    std::list<std::unique_ptr<C2Work>> abandonedWorks;
+
+    while (!mPendingWorks.empty()) {
+        std::unique_ptr<C2Work> work(std::move(mPendingWorks.front()));
+        mPendingWorks.pop_front();
+
+        // TODO: correlate the definition of flushed work result to framework.
+        work->result = C2_NOT_FOUND;
+        // When the work is abandoned, the input.buffers.front() shall reset by component.
+        work->input.buffers.front().reset();
+        abandonedWorks.emplace_back(std::move(work));
+    }
+
+    if (!abandonedWorks.empty()) {
+        mListener->onWorkDone_nb(shared_from_this(), std::move(abandonedWorks));
+    }
+}
+
+void C2VDAComponent::reportError(c2_status_t error) {
+    mListener->onError_nb(shared_from_this(), static_cast<uint32_t>(error));
+}
+
+class C2VDAComponentFactory : public C2ComponentFactory {
+public:
+    C2VDAComponentFactory(C2String decoderName)
+          : mDecoderName(decoderName),
+            mReflector(std::static_pointer_cast<C2ReflectorHelper>(
+                    GetCodec2VDAComponentStore()->getParamReflector())){};
+
+    c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+                                ComponentDeleter deleter) override {
+        UNUSED(deleter);
+        *component = std::shared_ptr<C2Component>(new C2VDAComponent(mDecoderName, id, mReflector));
+        return C2_OK;
+    }
+    c2_status_t createInterface(c2_node_id_t id,
+                                std::shared_ptr<C2ComponentInterface>* const interface,
+                                InterfaceDeleter deleter) override {
+        UNUSED(deleter);
+        *interface =
+                std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
+                        mDecoderName.c_str(), id,
+                        std::make_shared<C2VDAComponent::IntfImpl>(mDecoderName, mReflector)));
+        return C2_OK;
+    }
+    ~C2VDAComponentFactory() override = default;
+
+private:
+    const C2String mDecoderName;
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+}  // namespace android
+
+extern "C" ::C2ComponentFactory* CreateC2VDAH264Factory() {
+    ALOGV("in %s", __func__);
+    return new ::android::C2VDAComponentFactory(android::kH264DecoderName);
+}
+
+extern "C" void DestroyC2VDAH264Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
+
+extern "C" ::C2ComponentFactory* CreateC2VDAVP8Factory() {
+    ALOGV("in %s", __func__);
+    return new ::android::C2VDAComponentFactory(android::kVP8DecoderName);
+}
+
+extern "C" void DestroyC2VDAVP8Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
+
+extern "C" ::C2ComponentFactory* CreateC2VDAVP9Factory() {
+    ALOGV("in %s", __func__);
+    return new ::android::C2VDAComponentFactory(android::kVP9DecoderName);
+}
+
+extern "C" void DestroyC2VDAVP9Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..cbdf0f1
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,5 @@
+[Builtin Hooks]
+clang_format = true
+
+[Builtin Hooks Options]
+clang_format = --commit ${PREUPLOAD_COMMIT} -- style file --extensions h,cpp
diff --git a/cmds/Android.mk b/cmds/Android.mk
new file mode 100644
index 0000000..6cc84ae
--- /dev/null
+++ b/cmds/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        codec2.cpp \
+
+LOCAL_C_INCLUDES += \
+        $(TOP)/external/libchrome \
+        $(TOP)/external/gtest/include \
+        $(TOP)/external/v4l2_codec2/include \
+        $(TOP)/external/v4l2_codec2/vda \
+        $(TOP)/frameworks/av/media/libstagefright/include \
+        $(TOP)/frameworks/native/include \
+        $(TOP)/hardware/google/av/codec2/include \
+        $(TOP)/hardware/google/av/codec2/vndk/include \
+	$(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_MODULE := v4l2_codec2_testapp
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libchrome \
+                          libcutils \
+                          libgui \
+                          liblog \
+                          libmedia \
+                          libmediaextractor \
+                          libstagefright \
+                          libstagefright_codec2 \
+                          libstagefright_foundation \
+                          libstagefright_codec2_vndk \
+                          libui \
+                          libutils \
+                          libv4l2_codec2 \
+                          libv4l2_codec2_vda \
+                          android.hardware.media.bufferpool@1.0 \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
+LOCAL_CLANG := true
+
+include $(BUILD_EXECUTABLE)
diff --git a/cmds/codec2.cpp b/cmds/codec2.cpp
new file mode 100644
index 0000000..9458bd3
--- /dev/null
+++ b/cmds/codec2.cpp
@@ -0,0 +1,466 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "codec2"
+
+#include <C2VDAComponent.h>
+
+#include <C2Buffer.h>
+#include <C2BufferPriv.h>
+#include <C2Component.h>
+#include <C2PlatformSupport.h>
+#include <C2Work.h>
+#include <SimpleC2Interface.h>
+
+#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
+#include <gui/GLConsumer.h>
+#include <gui/IProducerListener.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/DataSource.h>
+#include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <thread>
+
+using namespace android;
+using namespace std::chrono_literals;
+
+namespace {
+
+const std::string kH264DecoderName = "c2.vda.avc.decoder";
+const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
+const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
+
+const int kWidth = 416;
+const int kHeight = 240;  // BigBuckBunny.mp4
+//const int kWidth = 560;
+//const int kHeight = 320;  // small.mp4
+const std::string kComponentName = kH264DecoderName;
+
+class C2VDALinearBuffer : public C2Buffer {
+public:
+    explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(block->offset(), block->size(), ::C2Fence())}) {}
+};
+
+class Listener;
+
+class SimplePlayer {
+public:
+    SimplePlayer();
+    ~SimplePlayer();
+
+    void onWorkDone(std::weak_ptr<C2Component> component,
+                    std::list<std::unique_ptr<C2Work>> workItems);
+    void onTripped(std::weak_ptr<C2Component> component,
+                   std::vector<std::shared_ptr<C2SettingResult>> settingResult);
+    void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
+
+    status_t play(const sp<IMediaSource>& source);
+
+private:
+    typedef std::unique_lock<std::mutex> ULock;
+
+    enum {
+        kInputBufferCount = 8,
+        kDefaultInputBufferSize = 1024 * 1024,
+    };
+
+    std::shared_ptr<Listener> mListener;
+
+    sp<IProducerListener> mProducerListener;
+
+    // Allocators
+    std::shared_ptr<C2Allocator> mLinearAlloc;
+    std::shared_ptr<C2BlockPool> mLinearBlockPool;
+
+    std::mutex mQueueLock;
+    std::condition_variable mQueueCondition;
+    std::list<std::unique_ptr<C2Work>> mWorkQueue;
+
+    std::mutex mProcessedLock;
+    std::condition_variable mProcessedCondition;
+    std::list<std::unique_ptr<C2Work>> mProcessedWork;
+
+    sp<Surface> mSurface;
+    sp<SurfaceComposerClient> mComposerClient;
+    sp<SurfaceControl> mControl;
+};
+
+class Listener : public C2Component::Listener {
+public:
+    explicit Listener(SimplePlayer* thiz) : mThis(thiz) {}
+    virtual ~Listener() = default;
+
+    virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
+                               std::list<std::unique_ptr<C2Work>> workItems) override {
+        mThis->onWorkDone(component, std::move(workItems));
+    }
+
+    virtual void onTripped_nb(
+            std::weak_ptr<C2Component> component,
+            std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+        mThis->onTripped(component, settingResult);
+    }
+
+    virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+        mThis->onError(component, errorCode);
+    }
+
+private:
+    SimplePlayer* const mThis;
+};
+
+SimplePlayer::SimplePlayer()
+      : mListener(new Listener(this)),
+        mProducerListener(new DummyProducerListener),
+        mComposerClient(new SurfaceComposerClient) {
+    CHECK_EQ(mComposerClient->initCheck(), OK);
+
+    std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
+    CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAlloc), C2_OK);
+
+    mLinearBlockPool = std::make_shared<C2BasicLinearBlockPool>(mLinearAlloc);
+
+    mControl = mComposerClient->createSurface(String8("A Surface"), kWidth, kHeight,
+                                              HAL_PIXEL_FORMAT_YV12);
+
+    CHECK(mControl != nullptr);
+    CHECK(mControl->isValid());
+
+    SurfaceComposerClient::Transaction{}.setLayer(mControl, INT_MAX).show(mControl).apply();
+
+    mSurface = mControl->getSurface();
+    CHECK(mSurface != nullptr);
+    mSurface->connect(NATIVE_WINDOW_API_CPU, mProducerListener);
+}
+
+SimplePlayer::~SimplePlayer() {
+    mComposerClient->dispose();
+}
+
+void SimplePlayer::onWorkDone(std::weak_ptr<C2Component> component,
+                              std::list<std::unique_ptr<C2Work>> workItems) {
+    (void)component;
+    ULock l(mProcessedLock);
+    for (auto& item : workItems) {
+        mProcessedWork.emplace_back(std::move(item));
+    }
+    mProcessedCondition.notify_all();
+}
+
+void SimplePlayer::onTripped(std::weak_ptr<C2Component> component,
+                             std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
+    (void)component;
+    (void)settingResult;
+    // TODO
+}
+
+void SimplePlayer::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
+    (void)component;
+    (void)errorCode;
+    // TODO
+}
+
+status_t SimplePlayer::play(const sp<IMediaSource>& source) {
+    std::deque<sp<ABuffer>> csds;
+    if (kComponentName == kH264DecoderName) {
+        sp<AMessage> format;
+        (void)convertMetaDataToMessage(source->getFormat(), &format);
+
+        csds.resize(2);
+        format->findBuffer("csd-0", &csds[0]);
+        format->findBuffer("csd-1", &csds[1]);
+    }
+
+    status_t err = source->start();
+
+    if (err != OK) {
+        ALOGE("source returned error %d (0x%08x)", err, err);
+        fprintf(stderr, "source returned error %d (0x%08x)\n", err, err);
+        return err;
+    }
+
+    std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
+            kComponentName, 0, std::make_shared<C2ReflectorHelper>()));
+
+    component->setListener_vb(mListener, C2_DONT_BLOCK);
+    std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
+            C2PortBlockPoolsTuning::output::AllocUnique(
+                    {static_cast<uint64_t>(C2BlockPool::BASIC_GRAPHIC)});
+    std::vector<std::unique_ptr<C2SettingResult>> result;
+    (void)component->intf()->config_vb({pools.get()}, C2_DONT_BLOCK, &result);
+    component->start();
+
+    mProcessedWork.clear();
+    for (int i = 0; i < kInputBufferCount; ++i) {
+        mWorkQueue.emplace_back(new C2Work);
+    }
+
+    std::atomic_bool running(true);
+    std::thread surfaceThread([this, &running]() {
+        const sp<IGraphicBufferProducer>& igbp = mSurface->getIGraphicBufferProducer();
+        std::vector<std::shared_ptr<C2Buffer>> pendingDisplayBuffers;
+        pendingDisplayBuffers.resize(BufferQueue::NUM_BUFFER_SLOTS);
+        while (running) {
+            std::unique_ptr<C2Work> work;
+            {
+                ULock l(mProcessedLock);
+                if (mProcessedWork.empty()) {
+                    mProcessedCondition.wait_for(l, 100ms);
+                    if (mProcessedWork.empty()) {
+                        continue;
+                    }
+                }
+                work = std::move(mProcessedWork.front());
+                mProcessedWork.pop_front();
+            }
+
+            CHECK_EQ(work->worklets.size(), 1u);
+            if (work->worklets.front()->output.buffers.size() == 1u) {
+                int slot;
+                sp<Fence> fence;
+                std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
+                C2ConstGraphicBlock graphic_block = output->data().graphicBlocks().front();
+
+                sp<GraphicBuffer> buffer(new GraphicBuffer(
+                        graphic_block.handle(), GraphicBuffer::CLONE_HANDLE, graphic_block.width(),
+                        graphic_block.height(), HAL_PIXEL_FORMAT_YCbCr_420_888, 1 /* layerCount */,
+                        GRALLOC_USAGE_SW_READ_OFTEN, graphic_block.width()));
+
+                CHECK_EQ(igbp->attachBuffer(&slot, buffer), OK);
+                ALOGV("attachBuffer slot=%d ts=%lld", slot,
+                      (work->worklets.front()->output.ordinal.timestamp * 1000ll).peekll());
+
+                IGraphicBufferProducer::QueueBufferInput qbi(
+                        (work->worklets.front()->output.ordinal.timestamp * 1000ll).peekll(), false,
+                        HAL_DATASPACE_UNKNOWN, Rect(graphic_block.width(), graphic_block.height()),
+                        NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW, 0, Fence::NO_FENCE, 0);
+                IGraphicBufferProducer::QueueBufferOutput qbo;
+                CHECK_EQ(igbp->queueBuffer(slot, qbi, &qbo), OK);
+
+                // If the slot is reused then we can make sure the previous graphic buffer is
+                // displayed (consumed), so we could returned the graphic buffer.
+                pendingDisplayBuffers[slot].swap(output);
+            }
+
+            bool eos = work->worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM;
+            // input buffer should be reset in component side.
+            CHECK_EQ(work->input.buffers.size(), 1u);
+            CHECK(work->input.buffers.front() == nullptr);
+            work->worklets.clear();
+            work->workletsProcessed = 0;
+
+            if (eos) {
+                running.store(false);  // stop the thread
+            }
+
+            ULock l(mQueueLock);
+            mWorkQueue.emplace_back(std::move(work));
+            mQueueCondition.notify_all();
+        }
+    });
+
+    long numFrames = 0;
+
+    for (;;) {
+        size_t size = 0u;
+        void* data = nullptr;
+        int64_t timestamp = 0u;
+        MediaBufferBase* buffer = nullptr;
+        sp<ABuffer> csd;
+        if (!csds.empty()) {
+            csd = std::move(csds.front());
+            csds.pop_front();
+            size = csd->size();
+            data = csd->data();
+        } else {
+            status_t err = source->read(&buffer);
+            if (err != OK) {
+                CHECK(buffer == nullptr);
+
+                if (err == INFO_FORMAT_CHANGED) {
+                    continue;
+                }
+
+                break;
+            }
+            MetaDataBase& meta = buffer->meta_data();
+            CHECK(meta.findInt64(kKeyTime, &timestamp));
+
+            size = buffer->size();
+            data = buffer->data();
+        }
+
+        // Prepare C2Work
+
+        std::unique_ptr<C2Work> work;
+        while (!work) {
+            ULock l(mQueueLock);
+            if (!mWorkQueue.empty()) {
+                work = std::move(mWorkQueue.front());
+                mWorkQueue.pop_front();
+            } else {
+                mQueueCondition.wait_for(l, 100ms);
+            }
+        }
+        work->input.flags = static_cast<C2FrameData::flags_t>(0);
+        work->input.ordinal.timestamp = timestamp;
+        work->input.ordinal.frameIndex = numFrames;
+
+        // Allocate input buffer.
+        std::shared_ptr<C2LinearBlock> block;
+        mLinearBlockPool->fetchLinearBlock(
+                size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+        C2WriteView view = block->map().get();
+        if (view.error() != C2_OK) {
+            fprintf(stderr, "C2LinearBlock::map() failed : %d\n", view.error());
+            break;
+        }
+        memcpy(view.base(), data, size);
+
+        work->input.buffers.clear();
+        work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
+        work->worklets.clear();
+        work->worklets.emplace_back(new C2Worklet);
+
+        std::list<std::unique_ptr<C2Work>> items;
+        items.push_back(std::move(work));
+
+        // DO THE DECODING
+        component->queue_nb(&items);
+
+        if (buffer) {
+            buffer->release();
+        }
+        ++numFrames;
+    }
+    component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS);
+
+    surfaceThread.join();
+
+    source->stop();
+    component->stop();
+    printf("finished...\n");
+    return OK;
+}
+
+}  // namespace
+
+static bool getMediaSourceFromFile(const char* filename, sp<IMediaSource>* source) {
+    source->clear();
+
+    sp<DataSource> dataSource =
+            DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename);
+
+    if (dataSource == nullptr) {
+        fprintf(stderr, "Unable to create data source.\n");
+        return false;
+    }
+
+    sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
+    if (extractor == nullptr) {
+        fprintf(stderr, "could not create extractor.\n");
+        return false;
+    }
+
+    std::string expectedMime;
+    if (kComponentName == kH264DecoderName) {
+        expectedMime = "video/avc";
+    } else if (kComponentName == kVP8DecoderName) {
+        expectedMime = "video/x-vnd.on2.vp8";
+    } else if (kComponentName == kVP9DecoderName) {
+        expectedMime = "video/x-vnd.on2.vp9";
+    } else {
+        fprintf(stderr, "unrecognized component name: %s\n", kComponentName.c_str());
+        return false;
+    }
+
+    for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
+        sp<MetaData> meta =
+                extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
+        if (meta == nullptr) {
+            continue;
+        }
+        const char* mime;
+        meta->findCString(kKeyMIMEType, &mime);
+        if (!strcasecmp(mime, expectedMime.c_str())) {
+            *source = extractor->getTrack(i);
+            if (*source == nullptr) {
+                fprintf(stderr, "It's nullptr track for track %zu.\n", i);
+                return false;
+            }
+            return true;
+        }
+    }
+    fprintf(stderr, "No track found.\n");
+    return false;
+}
+
+static void usage(const char* me) {
+    fprintf(stderr, "usage: %s [options] [input_filename]...\n", me);
+    fprintf(stderr, "       -h(elp)\n");
+}
+
+int main(int argc, char** argv) {
+    android::ProcessState::self()->startThreadPool();
+
+    int res;
+    while ((res = getopt(argc, argv, "h")) >= 0) {
+        switch (res) {
+        case 'h':
+        default: {
+            usage(argv[0]);
+            exit(1);
+            break;
+        }
+        }
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    if (argc < 1) {
+        fprintf(stderr, "No input file specified\n");
+        return 1;
+    }
+
+    SimplePlayer player;
+
+    for (int k = 0; k < argc; ++k) {
+        sp<IMediaSource> mediaSource;
+        if (!getMediaSourceFromFile(argv[k], &mediaSource)) {
+            fprintf(stderr, "Unable to get media source from file: %s\n", argv[k]);
+            return -1;
+        }
+        if (player.play(mediaSource) != OK) {
+            fprintf(stderr, "Player failed to play media source: %s\n", argv[k]);
+            return -1;
+        }
+    }
+
+    return 0;
+}
diff --git a/include/C2ArcVideoAcceleratorFactory.h b/include/C2ArcVideoAcceleratorFactory.h
new file mode 100644
index 0000000..9cdaf37
--- /dev/null
+++ b/include/C2ArcVideoAcceleratorFactory.h
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_ARC_VIDEO_ACCELERATOR_FACTORY_H
+#define ANDROID_C2_ARC_VIDEO_ACCELERATOR_FACTORY_H
+
+#include <media/arcvideobridge/IArcVideoBridge.h>
+#include <utils/Singleton.h>
+
+#include <components/arc/common/video.mojom.h>
+#include <components/arc/common/video_decode_accelerator.mojom.h>
+#include <components/arc/common/video_encode_accelerator.mojom.h>
+
+namespace android {
+// Helper class to create message pipe to the ArcVideoAccelerator.
+// This class should only be used in the Mojo thread.
+class C2ArcVideoAcceleratorFactory : public Singleton<C2ArcVideoAcceleratorFactory> {
+public:
+    bool createVideoDecodeAccelerator(::arc::mojom::VideoDecodeAcceleratorRequest request);
+    bool createVideoEncodeAccelerator(::arc::mojom::VideoEncodeAcceleratorRequest request);
+    bool createVideoProtectedBufferAllocator(
+            ::arc::mojom::VideoProtectedBufferAllocatorRequest request);
+    int32_t hostVersion() const;
+
+private:
+    C2ArcVideoAcceleratorFactory();
+
+    uint32_t mHostVersion;
+    sp<IArcVideoBridge> mArcVideoBridge;
+    ::arc::mojom::VideoAcceleratorFactoryPtr mRemoteFactory;
+
+    friend class Singleton<C2ArcVideoAcceleratorFactory>;
+};
+}  // namespace android
+
+#endif  // ANDROID_C2_ARC_VIDEO_ACCELERATOR_FACTORY_H
diff --git a/include/C2VDAAdaptor.h b/include/C2VDAAdaptor.h
new file mode 100644
index 0000000..8f24d10
--- /dev/null
+++ b/include/C2VDAAdaptor.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_ADAPTOR_H
+#define ANDROID_C2_VDA_ADAPTOR_H
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <video_decode_accelerator.h>
+
+#include <base/macros.h>
+
+namespace android {
+
+// This class translates adaptor API to media::VideoDecodeAccelerator API to make communication
+// between Codec 2.0 VDA component and VDA.
+class C2VDAAdaptor : public VideoDecodeAcceleratorAdaptor,
+                     public media::VideoDecodeAccelerator::Client {
+public:
+    C2VDAAdaptor();
+    ~C2VDAAdaptor() override;
+
+    // Implementation of the VideoDecodeAcceleratorAdaptor interface.
+    Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                      VideoDecodeAcceleratorAdaptor::Client* client) override;
+    void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) override;
+    void assignPictureBuffers(uint32_t numOutputBuffers) override;
+    void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+                                const std::vector<VideoFramePlane>& planes) override;
+    void reusePictureBuffer(int32_t pictureBufferId) override;
+    void flush() override;
+    void reset() override;
+    void destroy() override;
+
+    static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
+            uint32_t inputFormatFourcc);
+
+    static HalPixelFormat ResolveBufferFormat(bool crcb, bool semiplanar);
+
+    // Implementation of the media::VideoDecodeAccelerator::Client interface.
+    void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
+                               media::VideoPixelFormat output_format,
+                               const media::Size& dimensions) override;
+    void DismissPictureBuffer(int32_t picture_buffer_id) override;
+    void PictureReady(const media::Picture& picture) override;
+    void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
+    void NotifyFlushDone() override;
+    void NotifyResetDone() override;
+    void NotifyError(media::VideoDecodeAccelerator::Error error) override;
+
+private:
+    std::unique_ptr<media::VideoDecodeAccelerator> mVDA;
+    VideoDecodeAcceleratorAdaptor::Client* mClient;
+
+    // The number of allocated output buffers. This is obtained from assignPictureBuffers call from
+    // client, and used to check validity of picture id in importBufferForPicture and
+    // reusePictureBuffer.
+    uint32_t mNumOutputBuffers;
+    // The picture size for creating picture buffers. This is obtained while VDA calls
+    // ProvidePictureBuffers.
+    media::Size mPictureSize;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptor);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_ADAPTOR_H
diff --git a/include/C2VDAAdaptorProxy.h b/include/C2VDAAdaptorProxy.h
new file mode 100644
index 0000000..fbfe009
--- /dev/null
+++ b/include/C2VDAAdaptorProxy.h
@@ -0,0 +1,103 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_ADAPTOR_PROXY_H
+#define ANDROID_C2_VDA_ADAPTOR_PROXY_H
+
+#include <memory>
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <video_decode_accelerator.h>
+
+#include <arc/Future.h>
+#include <mojo/public/cpp/bindings/binding.h>
+
+#include <components/arc/common/video.mojom.h>
+#include <components/arc/common/video_decode_accelerator.mojom.h>
+
+namespace arc {
+class MojoProcessSupport;
+}  // namespace arc
+
+namespace android {
+namespace arc {
+class C2VDAAdaptorProxy : public VideoDecodeAcceleratorAdaptor,
+                          public ::arc::mojom::VideoDecodeClient {
+public:
+    C2VDAAdaptorProxy();
+    explicit C2VDAAdaptorProxy(::arc::MojoProcessSupport* MojomProcessSupport);
+    ~C2VDAAdaptorProxy() override;
+
+    // Establishes ipc channel for video acceleration. Returns true if channel
+    // connected successfully.
+    // This must be called before all other methods.
+    bool establishChannel();
+
+    // Implementation of the VideoDecodeAcceleratorAdaptor interface.
+    Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                      VideoDecodeAcceleratorAdaptor::Client* client) override;
+    void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t size) override;
+    void assignPictureBuffers(uint32_t numOutputBuffers) override;
+    void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format, int handleFd,
+                                const std::vector<VideoFramePlane>& planes) override;
+    void reusePictureBuffer(int32_t pictureBufferId) override;
+    void flush() override;
+    void reset() override;
+    void destroy() override;
+
+    // ::arc::mojom::VideoDecodeClient implementations.
+    void ProvidePictureBuffers(::arc::mojom::PictureBufferFormatPtr format) override;
+    void PictureReady(::arc::mojom::PicturePtr picture) override;
+    void NotifyEndOfBitstreamBuffer(int32_t bitstream_id) override;
+    void NotifyError(::arc::mojom::VideoDecodeAccelerator::Result error) override;
+
+    // The following functions are called as callbacks.
+    void NotifyResetDone(::arc::mojom::VideoDecodeAccelerator::Result result);
+    void NotifyFlushDone(::arc::mojom::VideoDecodeAccelerator::Result result);
+
+    static media::VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
+            uint32_t inputFormatFourcc);
+    static HalPixelFormat ResolveBufferFormat(bool crcb, bool semiplanar);
+
+private:
+    void onConnectionError(const std::string& pipeName);
+    void establishChannelOnMojoThread(std::shared_ptr<::arc::Future<bool>> future);
+    void onVersionReady(std::shared_ptr<::arc::Future<bool>> future, uint32_t version);
+
+    // Closes ipc channel for video acceleration.
+    // This must be called before deleting this object.
+    void closeChannelOnMojoThread();
+
+    // mojo thread corresponding part of C2VDAAdaptorProxy implementations.
+    void initializeOnMojoThread(const media::VideoCodecProfile profile, const bool mSecureMode,
+                                const ::arc::mojom::VideoDecodeAccelerator::InitializeCallback& cb);
+    void decodeOnMojoThread(int32_t bitstreamId, int ashmemFd, off_t offset, uint32_t bytesUsed);
+    void assignPictureBuffersOnMojoThread(uint32_t numOutputBuffers);
+
+    void importBufferForPictureOnMojoThread(int32_t pictureBufferId, HalPixelFormat format,
+                                            int handleFd,
+                                            const std::vector<VideoFramePlane>& planes);
+    void reusePictureBufferOnMojoThread(int32_t pictureBufferId);
+    void flushOnMojoThread();
+    void resetOnMojoThread();
+
+    VideoDecodeAcceleratorAdaptor::Client* mClient;
+
+    // Task runner for mojom functions.
+    const scoped_refptr<base::SingleThreadTaskRunner> mMojoTaskRunner;
+
+    // |mVDAPtr| and |mBinding| should only be called on |mMojoTaskRunner| after bound.
+    ::arc::mojom::VideoDecodeAcceleratorPtr mVDAPtr;
+    mojo::Binding<::arc::mojom::VideoDecodeClient> mBinding;
+
+    // Used to cancel the wait on arc::Future.
+    sp<::arc::CancellationRelay> mRelay;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAAdaptorProxy);
+};
+}  // namespace arc
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_ADAPTOR_PROXY_H
diff --git a/include/C2VDACommon.h b/include/C2VDACommon.h
new file mode 100644
index 0000000..510a96c
--- /dev/null
+++ b/include/C2VDACommon.h
@@ -0,0 +1,19 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_COMMON_H
+#define ANDROID_C2_VDA_COMMON_H
+
+#include <inttypes.h>
+
+namespace android {
+enum class HalPixelFormat : uint32_t {
+    UNKNOWN = 0x0,
+    // The pixel formats defined in Android but are used among C2VDAComponent.
+    YCbCr_420_888 = 0x23,
+    YV12 = 0x32315659,
+    NV12 = 0x3231564e,
+};
+} // namespace android
+#endif  // ANDROID_C2_VDA_COMMON_H
diff --git a/include/C2VDAComponent.h b/include/C2VDAComponent.h
new file mode 100644
index 0000000..6df3fe7
--- /dev/null
+++ b/include/C2VDAComponent.h
@@ -0,0 +1,295 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_C2_VDA_COMPONENT_H
+#define ANDROID_C2_VDA_COMPONENT_H
+
+#include <VideoDecodeAcceleratorAdaptor.h>
+
+#include <rect.h>
+#include <size.h>
+#include <video_codecs.h>
+#include <video_decode_accelerator.h>
+
+#include <C2Component.h>
+#include <C2Config.h>
+#include <C2Enum.h>
+#include <C2Param.h>
+#include <C2ParamDef.h>
+#include <SimpleC2Interface.h>
+#include <util/C2InterfaceHelper.h>
+
+#include <base/macros.h>
+#include <base/memory/ref_counted.h>
+#include <base/single_thread_task_runner.h>
+#include <base/synchronization/waitable_event.h>
+#include <base/threading/thread.h>
+
+#include <atomic>
+#include <deque>
+#include <map>
+#include <mutex>
+#include <queue>
+#include <unordered_map>
+
+namespace android {
+
+class C2VDAComponent : public C2Component,
+                       public VideoDecodeAcceleratorAdaptor::Client,
+                       public std::enable_shared_from_this<C2VDAComponent> {
+public:
+    class IntfImpl : public C2InterfaceHelper {
+    public:
+        IntfImpl(C2String name, const std::shared_ptr<C2ReflectorHelper>& helper);
+
+        // interfaces for C2VDAComponent
+        c2_status_t status() const { return mInitStatus; }
+        media::VideoCodecProfile getCodecProfile() const { return mCodecProfile; }
+        C2BlockPool::local_id_t getBlockPoolId() const { return mOutputBlockPoolIds->m.values[0]; }
+
+    private:
+        // The input format kind; should be C2FormatCompressed.
+        std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+        // The output format kind; should be C2FormatVideo.
+        std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+        // The MIME type of input port.
+        std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+        // The MIME type of output port; should be MEDIA_MIMETYPE_VIDEO_RAW.
+        std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+        // Decoded video size for output.
+        std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+        // The suggested usage of input buffer allocator ID.
+        std::shared_ptr<C2PortAllocatorsTuning::input> mInputAllocatorIds;
+        // The suggested usage of output buffer allocator ID.
+        std::shared_ptr<C2PortAllocatorsTuning::output> mOutputAllocatorIds;
+        // Compnent uses this ID to fetch corresponding output block pool from platform.
+        std::shared_ptr<C2PortBlockPoolsTuning::output> mOutputBlockPoolIds;
+
+        c2_status_t mInitStatus;
+        media::VideoCodecProfile mCodecProfile;
+    };
+
+    C2VDAComponent(C2String name, c2_node_id_t id,
+                   const std::shared_ptr<C2ReflectorHelper>& helper);
+    virtual ~C2VDAComponent() override;
+
+    // Implementation of C2Component interface
+    virtual c2_status_t setListener_vb(const std::shared_ptr<Listener>& listener,
+                                       c2_blocking_t mayBlock) override;
+    virtual c2_status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) override;
+    virtual c2_status_t announce_nb(const std::vector<C2WorkOutline>& items) override;
+    virtual c2_status_t flush_sm(flush_mode_t mode,
+                                 std::list<std::unique_ptr<C2Work>>* const flushedWork) override;
+    virtual c2_status_t drain_nb(drain_mode_t mode) override;
+    virtual c2_status_t start() override;
+    virtual c2_status_t stop() override;
+    virtual c2_status_t reset() override;
+    virtual c2_status_t release() override;
+    virtual std::shared_ptr<C2ComponentInterface> intf() override;
+
+    // Implementation of VideDecodeAcceleratorAdaptor::Client interface
+    virtual void providePictureBuffers(uint32_t minNumBuffers,
+                                       const media::Size& codedSize) override;
+    virtual void dismissPictureBuffer(int32_t pictureBufferId) override;
+    virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                              const media::Rect& cropRect) override;
+    virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) override;
+    virtual void notifyFlushDone() override;
+    virtual void notifyResetDone() override;
+    virtual void notifyError(VideoDecodeAcceleratorAdaptor::Result error) override;
+
+private:
+    // The state machine enumeration on parent thread.
+    enum class State : int32_t {
+        // The initial state of component. State will change to LOADED after the component is
+        // created.
+        UNLOADED,
+        // The component is stopped. State will change to RUNNING when start() is called by
+        // framework.
+        LOADED,
+        // The component is running, State will change to LOADED when stop() or reset() is called by
+        // framework.
+        RUNNING,
+        // The component is in error state.
+        ERROR,
+    };
+    // The state machine enumeration on component thread.
+    enum class ComponentState : int32_t {
+        // This is the initial state until VDA initialization returns successfully.
+        UNINITIALIZED,
+        // VDA initialization returns successfully. VDA is ready to make progress.
+        STARTED,
+        // onDrain() is called. VDA is draining. Component will hold on queueing works until
+        // onDrainDone().
+        DRAINING,
+        // onFlush() is called. VDA is flushing. State will change to STARTED after onFlushDone().
+        FLUSHING,
+        // onStop() is called. VDA is shutting down. State will change to UNINITIALIZED after
+        // onStopDone().
+        STOPPING,
+        // onError() is called.
+        ERROR,
+    };
+
+    enum {
+        kDpbOutputBufferExtraCount = 3,  // Use the same number as ACodec.
+    };
+
+    // This constant is used to tell apart from drain_mode_t enumerations in C2Component.h, which
+    // means no drain request.
+    // Note: this value must be different than all enumerations in drain_mode_t.
+    static constexpr uint32_t NO_DRAIN = ~0u;
+
+    // Internal struct for work queue.
+    struct WorkEntry {
+        std::unique_ptr<C2Work> mWork;
+        uint32_t mDrainMode = NO_DRAIN;
+    };
+
+    // Internal struct to keep the information of a specific graphic block.
+    struct GraphicBlockInfo {
+        enum class State {
+            OWNED_BY_COMPONENT,    // Owned by this component.
+            OWNED_BY_ACCELERATOR,  // Owned by video decode accelerator.
+            OWNED_BY_CLIENT,       // Owned by client.
+        };
+
+        int32_t mBlockId = -1;
+        State mState = State::OWNED_BY_COMPONENT;
+        // Graphic block buffer allocated from allocator. This should be reused.
+        std::shared_ptr<C2GraphicBlock> mGraphicBlock;
+        // HAL pixel format used while importing to VDA.
+        HalPixelFormat mPixelFormat;
+        // The handle dupped from graphic block for importing to VDA.
+        base::ScopedFD mHandle;
+        // VideoFramePlane information for importing to VDA.
+        std::vector<VideoFramePlane> mPlanes;
+    };
+
+    struct VideoFormat {
+        HalPixelFormat mPixelFormat = HalPixelFormat::UNKNOWN;
+        uint32_t mMinNumBuffers = 0;
+        media::Size mCodedSize;
+        media::Rect mVisibleRect;
+
+        VideoFormat() {}
+        VideoFormat(HalPixelFormat pixelFormat, uint32_t minNumBuffers, media::Size codedSize,
+                    media::Rect visibleRect);
+    };
+
+    // Used as the release callback for C2VDAGraphicBuffer to get back the output buffer.
+    void returnOutputBuffer(int32_t pictureBufferId);
+
+    // These tasks should be run on the component thread |mThread|.
+    void onDestroy();
+    void onStart(media::VideoCodecProfile profile, base::WaitableEvent* done);
+    void onQueueWork(std::unique_ptr<C2Work> work);
+    void onDequeueWork();
+    void onInputBufferDone(int32_t bitstreamId);
+    void onOutputBufferDone(int32_t pictureBufferId, int32_t bitstreamId);
+    void onDrain(uint32_t drainMode);
+    void onDrainDone();
+    void onFlush();
+    void onStop(base::WaitableEvent* done);
+    void onResetDone();
+    void onFlushDone();
+    void onStopDone();
+    void onOutputFormatChanged(std::unique_ptr<VideoFormat> format);
+    void onVisibleRectChanged(const media::Rect& cropRect);
+    void onOutputBufferReturned(int32_t pictureBufferId);
+
+    // Send input buffer to accelerator with specified bitstream id.
+    void sendInputBufferToAccelerator(const C2ConstLinearBlock& input, int32_t bitstreamId);
+    // Send output buffer to accelerator.
+    void sendOutputBufferToAccelerator(GraphicBlockInfo* info);
+    // Set crop rectangle infomation to output format.
+    void setOutputFormatCrop(const media::Rect& cropRect);
+    // Helper function to get the specified GraphicBlockInfo object by its id.
+    GraphicBlockInfo* getGraphicBlockById(int32_t blockId);
+    // Helper function to get the specified work in mPendingWorks by bitstream id.
+    C2Work* getPendingWorkByBitstreamId(int32_t bitstreamId);
+    // Try to apply the output format change.
+    void tryChangeOutputFormat();
+    // Allocate output buffers (graphic blocks) from block allocator.
+    c2_status_t allocateBuffersFromBlockAllocator(const media::Size& size, uint32_t pixelFormat);
+    // Append allocated buffer (graphic block) to mGraphicBlocks.
+    void appendOutputBuffer(std::shared_ptr<C2GraphicBlock> block);
+
+    // Check for finished works in mPendingWorks. If any, make onWorkDone call to listener.
+    void reportFinishedWorkIfAny();
+    // Make onWorkDone call to listener for reporting EOS work in mPendingWorks.
+    void reportEOSWork();
+    // Abandon all works in mPendingWorks.
+    void reportAbandonedWorks();
+    // Make onError call to listener for reporting errors.
+    void reportError(c2_status_t error);
+    // Helper function to determine if the work is finished.
+    bool isWorkDone(const C2Work* work) const;
+
+    // The pointer of component interface implementation.
+    std::shared_ptr<IntfImpl> mIntfImpl;
+    // The pointer of component interface.
+    const std::shared_ptr<C2ComponentInterface> mIntf;
+    // The pointer of component listener.
+    std::shared_ptr<Listener> mListener;
+
+    // The main component thread.
+    base::Thread mThread;
+    // The task runner on component thread.
+    scoped_refptr<base::SingleThreadTaskRunner> mTaskRunner;
+
+    // The following members should be utilized on component thread |mThread|.
+
+    // The initialization result retrieved from VDA.
+    VideoDecodeAcceleratorAdaptor::Result mVDAInitResult;
+    // The pointer of VideoDecodeAcceleratorAdaptor.
+    std::unique_ptr<VideoDecodeAcceleratorAdaptor> mVDAAdaptor;
+    // The done event pointer of stop procedure. It should be restored in onStop() and signaled in
+    // onStopDone().
+    base::WaitableEvent* mStopDoneEvent;
+    // The state machine on component thread.
+    ComponentState mComponentState;
+    // The indicator of drain mode (true for draining with EOS). This should be always set along
+    // with component going to DRAINING state, and only regarded under DRAINING state.
+    bool mDrainWithEOS;
+    // The vector of storing allocated output graphic block information.
+    std::vector<GraphicBlockInfo> mGraphicBlocks;
+    // The work queue. Works are queued along with drain mode from component API queue_nb and
+    // dequeued by the decode process of component.
+    std::queue<WorkEntry> mQueue;
+    // Store all pending works. The dequeued works are placed here until they are finished and then
+    // sent out by onWorkDone call to listener.
+    std::deque<std::unique_ptr<C2Work>> mPendingWorks;
+    // Store the visible rect provided from VDA. If this is changed, component should issue a
+    // visible size change event.
+    media::Rect mRequestedVisibleRect;
+    // The current output format.
+    VideoFormat mOutputFormat;
+    // The pending output format. We need to wait until all buffers are returned back to apply the
+    // format change.
+    std::unique_ptr<VideoFormat> mPendingOutputFormat;
+    // Record the timestamp of the last output buffer. This is used to determine if the work is
+    // finished.
+    int64_t mLastOutputTimestamp;
+    // The pointer of output block pool.
+    std::shared_ptr<C2BlockPool> mOutputBlockPool;
+
+    // The following members should be utilized on parent thread.
+
+    // The input codec profile which is configured in component interface.
+    media::VideoCodecProfile mCodecProfile;
+    // The state machine on parent thread which should be atomic.
+    std::atomic<State> mState;
+    // The mutex lock to synchronize start/stop/reset/release calls.
+    std::mutex mStartStopLock;
+
+    // The WeakPtrFactory for getting weak pointer of this.
+    base::WeakPtrFactory<C2VDAComponent> mWeakThisFactory;
+
+    DISALLOW_COPY_AND_ASSIGN(C2VDAComponent);
+};
+
+}  // namespace android
+
+#endif  // ANDROID_C2_VDA_COMPONENT_H
diff --git a/include/VideoDecodeAcceleratorAdaptor.h b/include/VideoDecodeAcceleratorAdaptor.h
new file mode 100644
index 0000000..c9fc50b
--- /dev/null
+++ b/include/VideoDecodeAcceleratorAdaptor.h
@@ -0,0 +1,112 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
+#define ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
+
+#include <C2VDACommon.h>
+
+#include <rect.h>
+#include <size.h>
+#include <video_codecs.h>
+#include <video_pixel_format.h>
+
+#include <vector>
+
+namespace android {
+
+// The offset and stride of a video frame plane.
+struct VideoFramePlane {
+    uint32_t mOffset;
+    uint32_t mStride;
+};
+
+// The HAL pixel format information supported by Android flexible YUV format.
+struct SupportedPixelFormat {
+    bool mCrcb;
+    bool mSemiplanar;
+    HalPixelFormat mPixelFormat;
+};
+
+// Video decoder accelerator adaptor interface.
+// The adaptor plays the role of providing unified adaptor API functions and client callback to
+// codec component side.
+// The adaptor API and client callback are modeled after media::VideoDecodeAccelerator which is
+// ported from Chrome and are 1:1 mapped with its API functions.
+class VideoDecodeAcceleratorAdaptor {
+public:
+    enum Result {
+        SUCCESS = 0,
+        ILLEGAL_STATE = 1,
+        INVALID_ARGUMENT = 2,
+        UNREADABLE_INPUT = 3,
+        PLATFORM_FAILURE = 4,
+        INSUFFICIENT_RESOURCES = 5,
+    };
+
+    // The adaptor client interface. This interface should be implemented in the component side.
+    class Client {
+    public:
+        virtual ~Client() {}
+
+        // Callback to tell client how many and what size of buffers to provide.
+        virtual void providePictureBuffers(uint32_t minNumBuffers,
+                                           const media::Size& codedSize) = 0;
+
+        // Callback to dismiss picture buffer that was assigned earlier.
+        virtual void dismissPictureBuffer(int32_t pictureBufferId) = 0;
+
+        // Callback to deliver decoded pictures ready to be displayed.
+        virtual void pictureReady(int32_t pictureBufferId, int32_t bitstreamId,
+                                  const media::Rect& cropRect) = 0;
+
+        // Callback to notify that decoder has decoded the end of the bitstream buffer with
+        // specified ID.
+        virtual void notifyEndOfBitstreamBuffer(int32_t bitstreamId) = 0;
+
+        // Flush completion callback.
+        virtual void notifyFlushDone() = 0;
+
+        // Reset completion callback.
+        virtual void notifyResetDone() = 0;
+
+        // Callback to notify about errors. Note that errors in initialize() will not be reported
+        // here, instead of by its returned value.
+        virtual void notifyError(Result error) = 0;
+    };
+
+    // Initializes the video decoder with specific profile. This call is synchronous and returns
+    // SUCCESS iff initialization is successful.
+    virtual Result initialize(media::VideoCodecProfile profile, bool secureMode,
+                              Client* client) = 0;
+
+    // Decodes given buffer handle with bitstream ID.
+    virtual void decode(int32_t bitstreamId, int handleFd, off_t offset, uint32_t bytesUsed) = 0;
+
+    // Assigns a specified number of picture buffer set to the video decoder.
+    virtual void assignPictureBuffers(uint32_t numOutputBuffers) = 0;
+
+    // Imports planes as backing memory for picture buffer with specified ID.
+    virtual void importBufferForPicture(int32_t pictureBufferId, HalPixelFormat format,
+                                        int handleFd,
+                                        const std::vector<VideoFramePlane>& planes) = 0;
+
+    // Sends picture buffer to be reused by the decoder by its piture ID.
+    virtual void reusePictureBuffer(int32_t pictureBufferId) = 0;
+
+    // Flushes the decoder.
+    virtual void flush() = 0;
+
+    // Resets the decoder.
+    virtual void reset() = 0;
+
+    // Destroys the decoder.
+    virtual void destroy() = 0;
+
+    virtual ~VideoDecodeAcceleratorAdaptor() {}
+};
+
+}  // namespace android
+
+#endif  // ANDROID_VIDEO_DECODE_ACCELERATOR_ADAPTOR_H
diff --git a/tests/Android.mk b/tests/Android.mk
new file mode 100644
index 0000000..4bafb4a
--- /dev/null
+++ b/tests/Android.mk
@@ -0,0 +1,78 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := C2VDACompIntf_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  C2VDACompIntf_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libchrome \
+  libcutils \
+  liblog \
+  libstagefright_codec2 \
+  libstagefright_codec2_vndk \
+  libutils \
+  libv4l2_codec2 \
+  libv4l2_codec2_vda \
+
+LOCAL_C_INCLUDES += \
+  $(TOP)/external/v4l2_codec2/include \
+  $(TOP)/external/v4l2_codec2/vda \
+  $(TOP)/hardware/google/av/codec2/include \
+  $(TOP)/hardware/google/av/codec2/vndk/include \
+  $(TOP)/hardware/google/av/media/codecs/base/include \
+
+LOCAL_CFLAGS += -Werror -Wall -std=c++14
+LOCAL_CLANG := true
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+include $(BUILD_NATIVE_TEST)
+
+
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := C2VDAComponent_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  C2VDAComponent_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libchrome \
+  libcutils \
+  liblog \
+  libmedia \
+  libmediaextractor \
+  libstagefright \
+  libstagefright_codec2 \
+  libstagefright_codec2_vndk \
+  libstagefright_foundation \
+  libutils \
+  libv4l2_codec2 \
+  libv4l2_codec2_vda \
+  android.hardware.media.bufferpool@1.0 \
+
+LOCAL_C_INCLUDES += \
+  $(TOP)/external/libchrome \
+  $(TOP)/external/v4l2_codec2/include \
+  $(TOP)/external/v4l2_codec2/vda \
+  $(TOP)/frameworks/av/media/libstagefright/include \
+  $(TOP)/hardware/google/av/codec2/include \
+  $(TOP)/hardware/google/av/codec2/vndk/include \
+  $(TOP)/hardware/google/av/media/codecs/base/include \
+
+# -Wno-unused-parameter is needed for libchrome/base codes
+LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter -std=c++14
+LOCAL_CLANG := true
+
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+include $(BUILD_NATIVE_TEST)
diff --git a/tests/C2VDACompIntf_test.cpp b/tests/C2VDACompIntf_test.cpp
new file mode 100644
index 0000000..6ed0753
--- /dev/null
+++ b/tests/C2VDACompIntf_test.cpp
@@ -0,0 +1,426 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDACompIntf_test"
+
+#include <C2VDAComponent.h>
+
+#include <C2PlatformSupport.h>
+
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <limits>
+
+#define UNUSED(expr)  \
+    do {              \
+        (void)(expr); \
+    } while (0)
+
+namespace android {
+
+const C2String testCompName = "c2.vda.avc.decoder";
+const c2_node_id_t testCompNodeId = 12345;
+
+const char* MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
+const char* MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
+
+const C2Allocator::id_t kInputAllocators[] = {C2PlatformAllocatorStore::ION};
+const C2Allocator::id_t kOutputAllocators[] = {C2PlatformAllocatorStore::GRALLOC};
+const C2BlockPool::local_id_t kDefaultOutputBlockPool = C2BlockPool::BASIC_GRAPHIC;
+
+class C2VDACompIntfTest : public ::testing::Test {
+protected:
+    C2VDACompIntfTest() {
+        mReflector = std::make_shared<C2ReflectorHelper>();
+        mIntf = std::shared_ptr<C2ComponentInterface>(new SimpleInterface<C2VDAComponent::IntfImpl>(
+                testCompName.c_str(), testCompNodeId,
+                std::make_shared<C2VDAComponent::IntfImpl>(testCompName, mReflector)));
+    }
+    ~C2VDACompIntfTest() override {}
+
+    template <typename T>
+    void testReadOnlyParam(const T* expected, T* invalid);
+
+    template <typename T>
+    void checkReadOnlyFailureOnConfig(T* param);
+
+    template <typename T>
+    void testReadOnlyParamOnStack(const T* expected, T* invalid);
+
+    template <typename T>
+    void testReadOnlyParamOnHeap(const T* expected, T* invalid);
+
+    template <typename T>
+    void testWritableParam(T* newParam);
+
+    template <typename T>
+    void testInvalidWritableParam(T* invalidParam);
+
+    template <typename T>
+    void testWritableVideoSizeParam(int32_t widthMin, int32_t widthMax, int32_t widthStep,
+                                    int32_t heightMin, int32_t heightMax, int32_t heightStep);
+
+    std::shared_ptr<C2ComponentInterface> mIntf;
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParam(const T* expected, T* invalid) {
+    testReadOnlyParamOnStack(expected, invalid);
+    testReadOnlyParamOnHeap(expected, invalid);
+}
+
+template <typename T>
+void C2VDACompIntfTest::checkReadOnlyFailureOnConfig(T* param) {
+    std::vector<C2Param*> params{param};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+
+    // TODO: do not assert on checking return value since it is not consistent for
+    //       C2InterfaceHelper now. (b/79720928)
+    //   1) if config same value, it returns C2_OK
+    //   2) if config different value, it returns C2_CORRUPTED. But when you config again, it
+    //      returns C2_OK
+    //ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    mIntf->config_vb(params, C2_DONT_BLOCK, &failures);
+
+    // TODO: failure is not yet supported for C2InterfaceHelper
+    //ASSERT_EQ(1u, failures.size());
+    //EXPECT_EQ(C2SettingResult::READ_ONLY, failures[0]->failure);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParamOnStack(const T* expected, T* invalid) {
+    T param;
+    std::vector<C2Param*> stackParams{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*expected, param);
+
+    checkReadOnlyFailureOnConfig(&param);
+    checkReadOnlyFailureOnConfig(invalid);
+
+    // The param must not change after failed config.
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*expected, param);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testReadOnlyParamOnHeap(const T* expected, T* invalid) {
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+
+    uint32_t index = expected->index();
+
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*expected, *heapParams[0]);
+
+    checkReadOnlyFailureOnConfig(heapParams[0].get());
+    checkReadOnlyFailureOnConfig(invalid);
+
+    // The param must not change after failed config.
+    heapParams.clear();
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*expected, *heapParams[0]);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testWritableParam(T* newParam) {
+    std::vector<C2Param*> params{newParam};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(0u, failures.size());
+
+    // The param must change to newParam
+    // Check like param on stack
+    T param;
+    std::vector<C2Param*> stackParams{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(*newParam, param);
+
+    // Check also like param on heap
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {newParam->index()}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(*newParam, *heapParams[0]);
+}
+
+template <typename T>
+void C2VDACompIntfTest::testInvalidWritableParam(T* invalidParam) {
+    // Get the current parameter info
+    T preParam;
+    std::vector<C2Param*> stackParams{&preParam};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+
+    // Config invalid value. The failure is expected
+    std::vector<C2Param*> params{invalidParam};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(1u, failures.size());
+
+    //The param must not change after config failed
+    T param;
+    std::vector<C2Param*> stackParams2{&param};
+    ASSERT_EQ(C2_OK, mIntf->query_vb(stackParams2, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(preParam, param);
+
+    // Check also like param on heap
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {invalidParam->index()}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    EXPECT_EQ(preParam, *heapParams[0]);
+}
+
+bool isUnderflowSubstract(int32_t a, int32_t b) {
+    return a < 0 && b > a - std::numeric_limits<int32_t>::min();
+}
+
+bool isOverflowAdd(int32_t a, int32_t b) {
+    return a > 0 && b > std::numeric_limits<int32_t>::max() - a;
+}
+
+template <typename T>
+void C2VDACompIntfTest::testWritableVideoSizeParam(int32_t widthMin, int32_t widthMax,
+                                                   int32_t widthStep, int32_t heightMin,
+                                                   int32_t heightMax, int32_t heightStep) {
+    // Test supported values of video size
+    T valid;
+    for (int32_t h = heightMin; h <= heightMax; h += heightStep) {
+        for (int32_t w = widthMin; w <= widthMax; w += widthStep) {
+            valid.width = w;
+            valid.height = h;
+            {
+                SCOPED_TRACE("testWritableParam");
+                testWritableParam(&valid);
+                if (HasFailure()) {
+                    printf("Failed while config width = %d, height = %d\n", valid.width,
+                           valid.height);
+                }
+                if (HasFatalFailure()) return;
+            }
+        }
+    }
+
+    // TODO: validate possible values in C2InterfaceHelper is not implemented yet.
+    //// Test invalid values video size
+    //T invalid;
+    //// Width or height is smaller than min values
+    //if (!isUnderflowSubstract(widthMin, widthStep)) {
+    //    invalid.width = widthMin - widthStep;
+    //    invalid.height = heightMin;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (!isUnderflowSubstract(heightMin, heightStep)) {
+    //    invalid.width = widthMin;
+    //    invalid.height = heightMin - heightStep;
+    //    testInvalidWritableParam(&invalid);
+    //}
+
+    //// Width or height is bigger than max values
+    //if (!isOverflowAdd(widthMax, widthStep)) {
+    //    invalid.width = widthMax + widthStep;
+    //    invalid.height = heightMax;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (!isOverflowAdd(heightMax, heightStep)) {
+    //    invalid.width = widthMax;
+    //    invalid.height = heightMax + heightStep;
+    //    testInvalidWritableParam(&invalid);
+    //}
+
+    //// Invalid width/height within the range
+    //if (widthStep != 1) {
+    //    invalid.width = widthMin + 1;
+    //    invalid.height = heightMin;
+    //    testInvalidWritableParam(&invalid);
+    //}
+    //if (heightStep != 1) {
+    //    invalid.width = widthMin;
+    //    invalid.height = heightMin + 1;
+    //    testInvalidWritableParam(&invalid);
+    //}
+}
+
+#define TRACED_FAILURE(func)                            \
+    do {                                                \
+        SCOPED_TRACE(#func);                            \
+        func;                                           \
+        if (::testing::Test::HasFatalFailure()) return; \
+    } while (false)
+
+TEST_F(C2VDACompIntfTest, CreateInstance) {
+    auto name = mIntf->getName();
+    auto id = mIntf->getId();
+    printf("name = %s\n", name.c_str());
+    printf("node_id = %u\n", id);
+    EXPECT_STREQ(name.c_str(), testCompName.c_str());
+    EXPECT_EQ(id, testCompNodeId);
+}
+
+TEST_F(C2VDACompIntfTest, TestInputFormat) {
+    C2StreamBufferTypeSetting::input expected(0u, C2FormatCompressed);
+    expected.setStream(0);  // only support single stream
+    C2StreamBufferTypeSetting::input invalid(0u, C2FormatVideo);
+    invalid.setStream(0);  // only support single stream
+    TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputFormat) {
+    C2StreamBufferTypeSetting::output expected(0u, C2FormatVideo);
+    expected.setStream(0);  // only support single stream
+    C2StreamBufferTypeSetting::output invalid(0u, C2FormatCompressed);
+    invalid.setStream(0);  // only support single stream
+    TRACED_FAILURE(testReadOnlyParam(&expected, &invalid));
+}
+
+TEST_F(C2VDACompIntfTest, TestInputPortMime) {
+    std::shared_ptr<C2PortMediaTypeSetting::input> expected(
+            AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_AVC));
+    std::shared_ptr<C2PortMediaTypeSetting::input> invalid(
+            AllocSharedString<C2PortMediaTypeSetting::input>(MEDIA_MIMETYPE_VIDEO_RAW));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputPortMime) {
+    std::shared_ptr<C2PortMediaTypeSetting::output> expected(
+            AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_RAW));
+    std::shared_ptr<C2PortMediaTypeSetting::output> invalid(
+            AllocSharedString<C2PortMediaTypeSetting::output>(MEDIA_MIMETYPE_VIDEO_AVC));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestVideoSize) {
+    C2StreamPictureSizeInfo::output videoSize;
+    videoSize.setStream(0);  // only support single stream
+    std::vector<C2FieldSupportedValuesQuery> widthC2FSV = {
+            {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::width),
+             C2FieldSupportedValuesQuery::CURRENT},
+    };
+    ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(widthC2FSV, C2_DONT_BLOCK));
+    std::vector<C2FieldSupportedValuesQuery> heightC2FSV = {
+            {C2ParamField(&videoSize, &C2StreamPictureSizeInfo::height),
+             C2FieldSupportedValuesQuery::CURRENT},
+    };
+    ASSERT_EQ(C2_OK, mIntf->querySupportedValues_vb(heightC2FSV, C2_DONT_BLOCK));
+    ASSERT_EQ(1u, widthC2FSV.size());
+    ASSERT_EQ(C2_OK, widthC2FSV[0].status);
+    ASSERT_EQ(C2FieldSupportedValues::RANGE, widthC2FSV[0].values.type);
+    auto& widthFSVRange = widthC2FSV[0].values.range;
+    int32_t widthMin = widthFSVRange.min.i32;
+    int32_t widthMax = widthFSVRange.max.i32;
+    int32_t widthStep = widthFSVRange.step.i32;
+
+    ASSERT_EQ(1u, heightC2FSV.size());
+    ASSERT_EQ(C2_OK, heightC2FSV[0].status);
+    ASSERT_EQ(C2FieldSupportedValues::RANGE, heightC2FSV[0].values.type);
+    auto& heightFSVRange = heightC2FSV[0].values.range;
+    int32_t heightMin = heightFSVRange.min.i32;
+    int32_t heightMax = heightFSVRange.max.i32;
+    int32_t heightStep = heightFSVRange.step.i32;
+
+    // test updating valid and invalid values
+    TRACED_FAILURE(testWritableVideoSizeParam<C2StreamPictureSizeInfo::output>(
+            widthMin, widthMax, widthStep, heightMin, heightMax, heightStep));
+}
+
+TEST_F(C2VDACompIntfTest, TestInputAllocatorIds) {
+    std::shared_ptr<C2PortAllocatorsTuning::input> expected(
+            C2PortAllocatorsTuning::input::AllocShared(kInputAllocators));
+    std::shared_ptr<C2PortAllocatorsTuning::input> invalid(
+            C2PortAllocatorsTuning::input::AllocShared(kOutputAllocators));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputAllocatorIds) {
+    std::shared_ptr<C2PortAllocatorsTuning::output> expected(
+            C2PortAllocatorsTuning::output::AllocShared(kOutputAllocators));
+    std::shared_ptr<C2PortAllocatorsTuning::output> invalid(
+            C2PortAllocatorsTuning::output::AllocShared(kInputAllocators));
+    TRACED_FAILURE(testReadOnlyParamOnHeap(expected.get(), invalid.get()));
+}
+
+TEST_F(C2VDACompIntfTest, TestOutputBlockPoolIds) {
+    std::vector<std::unique_ptr<C2Param>> heapParams;
+    C2Param::Index index = C2PortBlockPoolsTuning::output::PARAM_TYPE;
+
+    // Query the param and check the default value.
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    C2BlockPool::local_id_t value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
+    ASSERT_EQ(kDefaultOutputBlockPool, value);
+
+    // Configure the param.
+    C2BlockPool::local_id_t configBlockPools[] = {C2BlockPool::PLATFORM_START + 1};
+    std::shared_ptr<C2PortBlockPoolsTuning::output> newParam(
+            C2PortBlockPoolsTuning::output::AllocShared(configBlockPools));
+
+    std::vector<C2Param*> params{newParam.get()};
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    ASSERT_EQ(C2_OK, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
+    EXPECT_EQ(0u, failures.size());
+
+    // Query the param again and check the value is as configured
+    heapParams.clear();
+    ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
+    ASSERT_EQ(1u, heapParams.size());
+    value = ((C2PortBlockPoolsTuning*)heapParams[0].get())->m.values[0];
+    ASSERT_EQ(configBlockPools[0], value);
+}
+
+TEST_F(C2VDACompIntfTest, TestUnsupportedParam) {
+    C2ComponentTemporalInfo unsupportedParam;
+    std::vector<C2Param*> stackParams{&unsupportedParam};
+    ASSERT_EQ(C2_BAD_INDEX, mIntf->query_vb(stackParams, {}, C2_DONT_BLOCK, nullptr));
+    EXPECT_EQ(0u, unsupportedParam.size());  // invalidated
+}
+
+void dumpType(const C2FieldDescriptor::type_t type) {
+    switch (type) {
+    case C2FieldDescriptor::INT32:
+        printf("int32_t");
+        break;
+    case C2FieldDescriptor::UINT32:
+        printf("uint32_t");
+        break;
+    case C2FieldDescriptor::INT64:
+        printf("int64_t");
+        break;
+    case C2FieldDescriptor::UINT64:
+        printf("uint64_t");
+        break;
+    case C2FieldDescriptor::FLOAT:
+        printf("float");
+        break;
+    default:
+        printf("<flex>");
+        break;
+    }
+}
+
+void dumpStruct(const C2StructDescriptor& sd) {
+    printf("  struct: { ");
+    for (const C2FieldDescriptor& f : sd) {
+        printf("%s:", f.name().c_str());
+        dumpType(f.type());
+        printf(", ");
+    }
+    printf("}\n");
+}
+
+TEST_F(C2VDACompIntfTest, ParamReflector) {
+    std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+
+    ASSERT_EQ(mIntf->querySupportedParams_nb(&params), C2_OK);
+    for (const auto& paramDesc : params) {
+        printf("name: %s\n", paramDesc->name().c_str());
+        printf("  required: %s\n", paramDesc->isRequired() ? "yes" : "no");
+        printf("  type: %x\n", paramDesc->index().type());
+        std::unique_ptr<C2StructDescriptor> desc{mReflector->describe(paramDesc->index().type())};
+        if (desc.get()) dumpStruct(*desc);
+    }
+}
+}  // namespace android
diff --git a/tests/C2VDAComponent_test.cpp b/tests/C2VDAComponent_test.cpp
new file mode 100644
index 0000000..5e597c8
--- /dev/null
+++ b/tests/C2VDAComponent_test.cpp
@@ -0,0 +1,783 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2VDAComponent_test"
+
+#include <C2VDAComponent.h>
+
+#include <C2Buffer.h>
+#include <C2BufferPriv.h>
+#include <C2Component.h>
+#include <C2PlatformSupport.h>
+#include <C2Work.h>
+#include <SimpleC2Interface.h>
+
+#include <base/files/file.h>
+#include <base/files/file_path.h>
+#include <base/md5.h>
+#include <base/strings/string_piece.h>
+#include <base/strings/string_split.h>
+
+#include <gtest/gtest.h>
+#include <media/DataSource.h>
+#include <media/IMediaHTTPService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <utils/Log.h>
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <algorithm>
+#include <chrono>
+#include <thread>
+
+using namespace std::chrono_literals;
+
+namespace {
+
+const int kMD5StringLength = 32;
+
+// Read in golden MD5s for the sanity play-through check of this video
+void readGoldenMD5s(const std::string& videoFile, std::vector<std::string>* md5Strings) {
+    base::FilePath filepath(videoFile + ".md5");
+    std::string allMD5s;
+    base::ReadFileToString(filepath, &allMD5s);
+    *md5Strings = base::SplitString(allMD5s, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+    // Check these are legitimate MD5s.
+    for (const std::string& md5String : *md5Strings) {
+        // Ignore the empty string added by SplitString. Ignore comments.
+        if (!md5String.length() || md5String.at(0) == '#') {
+            continue;
+        }
+        if (static_cast<int>(md5String.length()) != kMD5StringLength) {
+            fprintf(stderr, "MD5 length error: %s\n", md5String.c_str());
+        }
+        if (std::count_if(md5String.begin(), md5String.end(), isxdigit) != kMD5StringLength) {
+            fprintf(stderr, "MD5 includes non-hex char: %s\n", md5String.c_str());
+        }
+    }
+    if (md5Strings->empty()) {
+        fprintf(stderr, "MD5 checksum file (%s) missing or empty.\n",
+                filepath.MaybeAsASCII().c_str());
+    }
+}
+
+// Get file path name of recording raw YUV
+base::FilePath getRecordOutputPath(const std::string& videoFile, int width, int height) {
+    base::FilePath filepath(videoFile);
+    filepath = filepath.RemoveExtension();
+    std::string suffix = "_output_" + std::to_string(width) + "x" + std::to_string(height) + ".yuv";
+    return base::FilePath(filepath.value() + suffix);
+}
+}  // namespace
+
+namespace android {
+
+// Input video data parameters. This could be overwritten by user argument [-i].
+// The syntax of each column is:
+//  filename:componentName:width:height:numFrames:numFragments
+// - |filename| is the file path to mp4 (h264) or webm (VP8/9) video.
+// - |componentName| specifies the name of decoder component.
+// - |width| and |height| are for video size (in pixels).
+// - |numFrames| is the number of picture frames.
+// - |numFragments| is the NALU (h264) or frame (VP8/9) count by MediaExtractor.
+const char* gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84";
+//const char* gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82";
+//const char* gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82";
+
+// Record decoded output frames as raw YUV format.
+// The recorded file will be named as "<video_name>_output_<width>x<height>.yuv" under the same
+// folder of input video file.
+bool gRecordOutputYUV = false;
+
+const std::string kH264DecoderName = "c2.vda.avc.decoder";
+const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
+const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
+
+// Magic constants for indicating the timing of flush being called.
+enum FlushPoint : int { END_OF_STREAM_FLUSH = -3, MID_STREAM_FLUSH = -2, NO_FLUSH = -1 };
+
+struct TestVideoFile {
+    enum class CodecType { UNKNOWN, H264, VP8, VP9 };
+
+    std::string mFilename;
+    std::string mComponentName;
+    CodecType mCodec = CodecType::UNKNOWN;
+    int mWidth = -1;
+    int mHeight = -1;
+    int mNumFrames = -1;
+    int mNumFragments = -1;
+    sp<IMediaSource> mData;
+};
+
+class C2VDALinearBuffer : public C2Buffer {
+public:
+    explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(block->offset(), block->size(), C2Fence())}) {}
+};
+
+class C2VDADummyLinearBuffer : public C2Buffer {
+public:
+    explicit C2VDADummyLinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
+          : C2Buffer({block->share(0, 0, C2Fence())}) {}
+};
+
+class Listener;
+
+class C2VDAComponentTest : public ::testing::Test {
+public:
+    void onWorkDone(std::weak_ptr<C2Component> component,
+                    std::list<std::unique_ptr<C2Work>> workItems);
+    void onTripped(std::weak_ptr<C2Component> component,
+                   std::vector<std::shared_ptr<C2SettingResult>> settingResult);
+    void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
+
+protected:
+    C2VDAComponentTest();
+    void SetUp() override;
+
+    void parseTestVideoData(const char* testVideoData);
+
+protected:
+    using ULock = std::unique_lock<std::mutex>;
+
+    enum {
+        kWorkCount = 16,
+    };
+
+    std::shared_ptr<Listener> mListener;
+
+    // Allocators
+    std::shared_ptr<C2Allocator> mLinearAlloc;
+    std::shared_ptr<C2BlockPool> mLinearBlockPool;
+
+    // The array of output video frame counters which will be counted in listenerThread. The array
+    // length equals to iteration time of stream play.
+    std::vector<int> mOutputFrameCounts;
+    // The array of work counters returned from component which will be counted in listenerThread.
+    // The array length equals to iteration time of stream play.
+    std::vector<int> mFinishedWorkCounts;
+    // The array of output frame MD5Sum which will be computed in listenerThread. The array length
+    // equals to iteration time of stream play.
+    std::vector<std::string> mMD5Strings;
+
+    // Mutex for |mWorkQueue| among main and listenerThread.
+    std::mutex mQueueLock;
+    std::condition_variable mQueueCondition;
+    std::list<std::unique_ptr<C2Work>> mWorkQueue;
+
+    // Mutex for |mProcessedWork| among main and listenerThread.
+    std::mutex mProcessedLock;
+    std::condition_variable mProcessedCondition;
+    std::list<std::unique_ptr<C2Work>> mProcessedWork;
+
+    // Mutex for |mFlushDone| among main and listenerThread.
+    std::mutex mFlushDoneLock;
+    std::condition_variable mFlushDoneCondition;
+    bool mFlushDone;
+
+    std::unique_ptr<TestVideoFile> mTestVideoFile;
+};
+
+class Listener : public C2Component::Listener {
+public:
+    explicit Listener(C2VDAComponentTest* thiz) : mThis(thiz) {}
+    virtual ~Listener() = default;
+
+    virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
+                               std::list<std::unique_ptr<C2Work>> workItems) override {
+        mThis->onWorkDone(component, std::move(workItems));
+    }
+
+    virtual void onTripped_nb(
+            std::weak_ptr<C2Component> component,
+            std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
+        mThis->onTripped(component, settingResult);
+    }
+
+    virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
+        mThis->onError(component, errorCode);
+    }
+
+private:
+    C2VDAComponentTest* const mThis;
+};
+
+C2VDAComponentTest::C2VDAComponentTest() : mListener(new Listener(this)) {
+    std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
+    CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAlloc), C2_OK);
+
+    mLinearBlockPool = std::make_shared<C2BasicLinearBlockPool>(mLinearAlloc);
+}
+
+void C2VDAComponentTest::onWorkDone(std::weak_ptr<C2Component> component,
+                                    std::list<std::unique_ptr<C2Work>> workItems) {
+    (void)component;
+    ULock l(mProcessedLock);
+    for (auto& item : workItems) {
+        mProcessedWork.emplace_back(std::move(item));
+    }
+    mProcessedCondition.notify_all();
+}
+
+void C2VDAComponentTest::onTripped(std::weak_ptr<C2Component> component,
+                                   std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
+    (void)component;
+    (void)settingResult;
+    // no-ops
+}
+
+void C2VDAComponentTest::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
+    (void)component;
+    // fail the test
+    FAIL() << "Get error code from component: " << errorCode;
+}
+
+void C2VDAComponentTest::SetUp() {
+    parseTestVideoData(gTestVideoData);
+
+    mWorkQueue.clear();
+    for (int i = 0; i < kWorkCount; ++i) {
+        mWorkQueue.emplace_back(new C2Work);
+    }
+    mProcessedWork.clear();
+    mFlushDone = false;
+}
+
+static bool getMediaSourceFromFile(const std::string& filename,
+                                   const TestVideoFile::CodecType codec, sp<IMediaSource>* source) {
+    source->clear();
+
+    sp<DataSource> dataSource =
+            DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename.c_str());
+
+    if (dataSource == nullptr) {
+        fprintf(stderr, "Unable to create data source.\n");
+        return false;
+    }
+
+    sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
+    if (extractor == nullptr) {
+        fprintf(stderr, "could not create extractor.\n");
+        return false;
+    }
+
+    std::string expectedMime;
+    if (codec == TestVideoFile::CodecType::H264) {
+        expectedMime = "video/avc";
+    } else if (codec == TestVideoFile::CodecType::VP8) {
+        expectedMime = "video/x-vnd.on2.vp8";
+    } else if (codec == TestVideoFile::CodecType::VP9) {
+        expectedMime = "video/x-vnd.on2.vp9";
+    } else {
+        fprintf(stderr, "unsupported codec type.\n");
+        return false;
+    }
+
+    for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
+        sp<MetaData> meta =
+                extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
+        if (meta == nullptr) {
+            continue;
+        }
+        const char* mime;
+        meta->findCString(kKeyMIMEType, &mime);
+        if (!strcasecmp(mime, expectedMime.c_str())) {
+            *source = extractor->getTrack(i);
+            if (*source == nullptr) {
+                fprintf(stderr, "It's NULL track for track %zu.\n", i);
+                return false;
+            }
+            return true;
+        }
+    }
+    fprintf(stderr, "No track found.\n");
+    return false;
+}
+
+void C2VDAComponentTest::parseTestVideoData(const char* testVideoData) {
+    ALOGV("videoDataStr: %s", testVideoData);
+    mTestVideoFile = std::make_unique<TestVideoFile>();
+
+    auto splitString = [](const std::string& input, const char delim) {
+        std::vector<std::string> splits;
+        auto beg = input.begin();
+        while (beg != input.end()) {
+            auto pos = std::find(beg, input.end(), delim);
+            splits.emplace_back(beg, pos);
+            beg = pos != input.end() ? pos + 1 : pos;
+        }
+        return splits;
+    };
+    auto tokens = splitString(testVideoData, ':');
+    ASSERT_EQ(tokens.size(), 6u);
+    mTestVideoFile->mFilename = tokens[0];
+    ASSERT_GT(mTestVideoFile->mFilename.length(), 0u);
+
+    mTestVideoFile->mComponentName = tokens[1];
+    if (mTestVideoFile->mComponentName == kH264DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::H264;
+    } else if (mTestVideoFile->mComponentName == kVP8DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::VP8;
+    } else if (mTestVideoFile->mComponentName == kVP9DecoderName) {
+        mTestVideoFile->mCodec = TestVideoFile::CodecType::VP9;
+    }
+    ASSERT_NE(mTestVideoFile->mCodec, TestVideoFile::CodecType::UNKNOWN);
+
+    mTestVideoFile->mWidth = std::stoi(tokens[2]);
+    mTestVideoFile->mHeight = std::stoi(tokens[3]);
+    mTestVideoFile->mNumFrames = std::stoi(tokens[4]);
+    mTestVideoFile->mNumFragments = std::stoi(tokens[5]);
+
+    ALOGV("mTestVideoFile: %s, %s, %d, %d, %d, %d", mTestVideoFile->mFilename.c_str(),
+          mTestVideoFile->mComponentName.c_str(), mTestVideoFile->mWidth, mTestVideoFile->mHeight,
+          mTestVideoFile->mNumFrames, mTestVideoFile->mNumFragments);
+}
+
+static void getFrameStringPieces(const C2GraphicView& constGraphicView,
+                                 std::vector<::base::StringPiece>* framePieces) {
+    const uint8_t* const* constData = constGraphicView.data();
+    ASSERT_NE(constData, nullptr);
+    const C2PlanarLayout& layout = constGraphicView.layout();
+    ASSERT_EQ(layout.type, C2PlanarLayout::TYPE_YUV) << "Only support YUV plane format";
+
+    framePieces->clear();
+    framePieces->push_back(
+            ::base::StringPiece(reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_Y]),
+                                constGraphicView.width() * constGraphicView.height()));
+    if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) {  // semi-planar mode
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(std::min(constData[C2PlanarLayout::PLANE_U],
+                                                       constData[C2PlanarLayout::PLANE_V])),
+                constGraphicView.width() * constGraphicView.height() / 2));
+    } else {
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_U]),
+                constGraphicView.width() * constGraphicView.height() / 4));
+        framePieces->push_back(::base::StringPiece(
+                reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_V]),
+                constGraphicView.width() * constGraphicView.height() / 4));
+    }
+}
+
+// Test parameters:
+// - Flush after work index. If this value is not negative, test will signal flush to component
+//   after queueing the work frame index equals to this value in the first iteration. Negative
+//   values may be magic constants, please refer to FlushPoint enum.
+// - Number of play through. This value specifies the iteration time for playing entire video. If
+//   |mFlushAfterWorkIndex| is not negative, the first iteration will perform flush, then repeat
+//   times as this value for playing entire video.
+// - Sanity check. If this is true, decoded content sanity check is enabled. Test will compute the
+//   MD5Sum for output frame data for a play-though iteration (not flushed), and compare to golden
+//   MD5Sums which should be stored in the file |video_filename|.md5
+// - Use dummy EOS work. If this is true, test will queue a dummy work with end-of-stream flag in
+//   the end of all input works. On the contrary, test will call drain_nb() to component.
+class C2VDAComponentParamTest
+      : public C2VDAComponentTest,
+        public ::testing::WithParamInterface<std::tuple<int, uint32_t, bool, bool>> {
+protected:
+    int mFlushAfterWorkIndex;
+    uint32_t mNumberOfPlaythrough;
+    bool mSanityCheck;
+    bool mUseDummyEOSWork;
+};
+
+TEST_P(C2VDAComponentParamTest, SimpleDecodeTest) {
+    mFlushAfterWorkIndex = std::get<0>(GetParam());
+    if (mFlushAfterWorkIndex == FlushPoint::MID_STREAM_FLUSH) {
+        mFlushAfterWorkIndex = mTestVideoFile->mNumFragments / 2;
+    } else if (mFlushAfterWorkIndex == FlushPoint::END_OF_STREAM_FLUSH) {
+        mFlushAfterWorkIndex = mTestVideoFile->mNumFragments - 1;
+    }
+    ASSERT_LT(mFlushAfterWorkIndex, mTestVideoFile->mNumFragments);
+    mNumberOfPlaythrough = std::get<1>(GetParam());
+
+    if (mFlushAfterWorkIndex >= 0) {
+        mNumberOfPlaythrough++;  // add the first iteration for perform mid-stream flushing.
+    }
+
+    mSanityCheck = std::get<2>(GetParam());
+    mUseDummyEOSWork = std::get<3>(GetParam());
+
+    // Reset counters and determine the expected answers for all iterations.
+    mOutputFrameCounts.resize(mNumberOfPlaythrough, 0);
+    mFinishedWorkCounts.resize(mNumberOfPlaythrough, 0);
+    mMD5Strings.resize(mNumberOfPlaythrough);
+    std::vector<int> expectedOutputFrameCounts(mNumberOfPlaythrough, mTestVideoFile->mNumFrames);
+    auto expectedWorkCount = mTestVideoFile->mNumFragments;
+    if (mUseDummyEOSWork) {
+        expectedWorkCount += 1;  // plus one dummy EOS work
+    }
+    std::vector<int> expectedFinishedWorkCounts(mNumberOfPlaythrough, expectedWorkCount);
+    if (mFlushAfterWorkIndex >= 0) {
+        // First iteration performs the mid-stream flushing.
+        expectedOutputFrameCounts[0] = mFlushAfterWorkIndex + 1;
+        expectedFinishedWorkCounts[0] = mFlushAfterWorkIndex + 1;
+    }
+
+    std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
+            mTestVideoFile->mComponentName, 0, std::make_shared<C2ReflectorHelper>()));
+
+    ASSERT_EQ(component->setListener_vb(mListener, C2_DONT_BLOCK), C2_OK);
+    ASSERT_EQ(component->start(), C2_OK);
+
+    std::atomic_bool running(true);
+    std::thread listenerThread([this, &running]() {
+        uint32_t iteration = 0;
+        ::base::MD5Context md5Ctx;
+        ::base::MD5Init(&md5Ctx);
+        ::base::File recordFile;
+        if (gRecordOutputYUV) {
+            auto recordFilePath = getRecordOutputPath(
+                    mTestVideoFile->mFilename, mTestVideoFile->mWidth, mTestVideoFile->mHeight);
+            fprintf(stdout, "record output file: %s\n", recordFilePath.value().c_str());
+            recordFile = ::base::File(recordFilePath,
+                                      ::base::File::FLAG_OPEN_ALWAYS | ::base::File::FLAG_WRITE);
+            ASSERT_TRUE(recordFile.IsValid());
+        }
+        while (running) {
+            std::unique_ptr<C2Work> work;
+            {
+                ULock l(mProcessedLock);
+                if (mProcessedWork.empty()) {
+                    mProcessedCondition.wait_for(l, 100ms);
+                    if (mProcessedWork.empty()) {
+                        continue;
+                    }
+                }
+                work = std::move(mProcessedWork.front());
+                mProcessedWork.pop_front();
+            }
+            mFinishedWorkCounts[iteration]++;
+            ALOGV("Output: frame index: %llu result: %d flags: 0x%x buffers: %zu",
+                  work->input.ordinal.frameIndex.peekull(), work->result,
+                  work->worklets.front()->output.flags,
+                  work->worklets.front()->output.buffers.size());
+
+            ASSERT_EQ(work->worklets.size(), 1u);
+            if (work->worklets.front()->output.buffers.size() == 1u) {
+                std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
+                C2ConstGraphicBlock graphicBlock = output->data().graphicBlocks().front();
+
+                // check graphic buffer size (coded size) is not less than given video size.
+                ASSERT_LE(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.width()));
+                ASSERT_LE(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.height()));
+
+                // check visible rect equals to given video size.
+                ASSERT_EQ(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.crop().width));
+                ASSERT_EQ(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.crop().height));
+                ASSERT_EQ(0u, graphicBlock.crop().left);
+                ASSERT_EQ(0u, graphicBlock.crop().top);
+
+                // Intended behavior for Intel libva driver (crbug.com/148546):
+                // The 5ms latency is laid here to make sure surface content is finished processed
+                // processed by libva.
+                std::this_thread::sleep_for(std::chrono::milliseconds(5));
+
+                const C2GraphicView& constGraphicView = graphicBlock.map().get();
+                ASSERT_EQ(C2_OK, constGraphicView.error());
+                std::vector<::base::StringPiece> framePieces;
+                getFrameStringPieces(constGraphicView, &framePieces);
+                ASSERT_FALSE(framePieces.empty());
+                if (mSanityCheck) {
+                    for (const auto& piece : framePieces) {
+                        ::base::MD5Update(&md5Ctx, piece);
+                    }
+                }
+                if (gRecordOutputYUV) {
+                    for (const auto& piece : framePieces) {
+                        ASSERT_EQ(static_cast<int>(piece.length()),
+                                  recordFile.WriteAtCurrentPos(piece.data(), piece.length()))
+                                << "Failed to write file for yuv recording...";
+                    }
+                }
+
+                work->worklets.front()->output.buffers.clear();
+                mOutputFrameCounts[iteration]++;
+            }
+
+            bool iteration_end =
+                    work->worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM;
+
+            // input buffer should be reset in component side.
+            ASSERT_EQ(work->input.buffers.size(), 1u);
+            ASSERT_TRUE(work->input.buffers.front() == nullptr);
+            work->worklets.clear();
+            work->workletsProcessed = 0;
+
+            if (iteration == 0 && work->input.ordinal.frameIndex.peeku() ==
+                                          static_cast<uint64_t>(mFlushAfterWorkIndex)) {
+                ULock l(mFlushDoneLock);
+                mFlushDone = true;
+                mFlushDoneCondition.notify_all();
+                iteration_end = true;
+            }
+
+            ULock l(mQueueLock);
+            mWorkQueue.emplace_back(std::move(work));
+            mQueueCondition.notify_all();
+
+            if (iteration_end) {
+                // record md5sum
+                ::base::MD5Digest digest;
+                ::base::MD5Final(&digest, &md5Ctx);
+                mMD5Strings[iteration] = ::base::MD5DigestToBase16(digest);
+                ::base::MD5Init(&md5Ctx);
+
+                iteration++;
+                if (iteration == mNumberOfPlaythrough) {
+                    running.store(false);  // stop the thread
+                }
+            }
+        }
+    });
+
+    for (uint32_t iteration = 0; iteration < mNumberOfPlaythrough; ++iteration) {
+        ASSERT_TRUE(getMediaSourceFromFile(mTestVideoFile->mFilename, mTestVideoFile->mCodec,
+                                           &mTestVideoFile->mData));
+
+        std::deque<sp<ABuffer>> csds;
+        if (mTestVideoFile->mCodec == TestVideoFile::CodecType::H264) {
+            // Get csd buffers for h264.
+            sp<AMessage> format;
+            (void)convertMetaDataToMessage(mTestVideoFile->mData->getFormat(), &format);
+            csds.resize(2);
+            format->findBuffer("csd-0", &csds[0]);
+            format->findBuffer("csd-1", &csds[1]);
+            ASSERT_TRUE(csds[0] != nullptr && csds[1] != nullptr);
+        }
+
+        ASSERT_EQ(mTestVideoFile->mData->start(), OK);
+
+        int numWorks = 0;
+        while (true) {
+            size_t size = 0u;
+            void* data = nullptr;
+            int64_t timestamp = 0u;
+            MediaBufferBase* buffer = nullptr;
+            sp<ABuffer> csd;
+            bool queueDummyEOSWork = false;
+            if (!csds.empty()) {
+                csd = std::move(csds.front());
+                csds.pop_front();
+                size = csd->size();
+                data = csd->data();
+            } else {
+                if (mTestVideoFile->mData->read(&buffer) != OK) {
+                    ASSERT_TRUE(buffer == nullptr);
+                    if (mUseDummyEOSWork) {
+                        ALOGV("Meet end of stream. Put a dummy EOS work.");
+                        queueDummyEOSWork = true;
+                    } else {
+                        ALOGV("Meet end of stream. Now drain the component.");
+                        ASSERT_EQ(component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS),
+                                  C2_OK);
+                        break;
+                    }
+                    // TODO(johnylin): add test with drain with DRAIN_COMPONENT_NO_EOS when we know
+                    //                 the actual use case of it.
+                } else {
+                    MetaDataBase& meta = buffer->meta_data();
+                    ASSERT_TRUE(meta.findInt64(kKeyTime, &timestamp));
+                    size = buffer->size();
+                    data = buffer->data();
+                }
+            }
+
+            std::unique_ptr<C2Work> work;
+            while (!work) {
+                ULock l(mQueueLock);
+                if (!mWorkQueue.empty()) {
+                    work = std::move(mWorkQueue.front());
+                    mWorkQueue.pop_front();
+                } else {
+                    mQueueCondition.wait_for(l, 100ms);
+                }
+            }
+
+            work->input.ordinal.frameIndex = static_cast<uint64_t>(numWorks);
+            work->input.buffers.clear();
+
+            std::shared_ptr<C2LinearBlock> block;
+            if (queueDummyEOSWork) {
+                work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+                work->input.ordinal.timestamp = 0;  // timestamp is invalid for dummy EOS work
+
+                // Create a dummy input buffer by allocating minimal size of buffer from block pool.
+                mLinearBlockPool->fetchLinearBlock(
+                        1, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                work->input.buffers.emplace_back(new C2VDADummyLinearBuffer(std::move(block)));
+                ALOGV("Input: (Dummy EOS) id: %llu", work->input.ordinal.frameIndex.peekull());
+            } else {
+                work->input.flags = static_cast<C2FrameData::flags_t>(0);
+                work->input.ordinal.timestamp = static_cast<uint64_t>(timestamp);
+
+                // Allocate an input buffer with data size.
+                mLinearBlockPool->fetchLinearBlock(
+                        size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+                C2WriteView view = block->map().get();
+                ASSERT_EQ(view.error(), C2_OK);
+                memcpy(view.base(), data, size);
+                work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
+                ALOGV("Input: bitstream id: %llu timestamp: %llu size: %zu",
+                      work->input.ordinal.frameIndex.peekull(),
+                      work->input.ordinal.timestamp.peekull(), size);
+            }
+
+            work->worklets.clear();
+            work->worklets.emplace_back(new C2Worklet);
+
+            std::list<std::unique_ptr<C2Work>> items;
+            items.push_back(std::move(work));
+
+            // Queue the work.
+            ASSERT_EQ(component->queue_nb(&items), C2_OK);
+            numWorks++;
+
+            if (buffer) {
+                buffer->release();
+            }
+
+            if (iteration == 0 && numWorks == mFlushAfterWorkIndex + 1) {
+                // Perform flush.
+                // Note: C2VDAComponent does not return work via |flushedWork|.
+                ASSERT_EQ(component->flush_sm(C2Component::FLUSH_COMPONENT,
+                                              nullptr /* flushedWork */),
+                          C2_OK);
+                break;
+            }
+
+            if (queueDummyEOSWork) {
+                break;
+            }
+        }
+
+        if (iteration == 0 && mFlushAfterWorkIndex >= 0) {
+            // Wait here until client get all flushed works.
+            while (true) {
+                ULock l(mFlushDoneLock);
+                if (mFlushDone) {
+                    break;
+                }
+                mFlushDoneCondition.wait_for(l, 100ms);
+            }
+            ALOGV("Got flush done signal");
+            EXPECT_EQ(numWorks, mFlushAfterWorkIndex + 1);
+        } else {
+            EXPECT_EQ(numWorks, expectedWorkCount);
+        }
+        ASSERT_EQ(mTestVideoFile->mData->stop(), OK);
+    }
+
+    listenerThread.join();
+    ASSERT_EQ(running, false);
+    ASSERT_EQ(component->stop(), C2_OK);
+
+    // Finally check the decoding want as expected.
+    for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
+        if (mFlushAfterWorkIndex >= 0 && i == 0) {
+            EXPECT_LE(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
+        } else {
+            EXPECT_EQ(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
+        }
+        EXPECT_EQ(mFinishedWorkCounts[i], expectedFinishedWorkCounts[i]) << "At iteration: " << i;
+    }
+
+    if (mSanityCheck) {
+        std::vector<std::string> goldenMD5s;
+        readGoldenMD5s(mTestVideoFile->mFilename, &goldenMD5s);
+        for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
+            if (mFlushAfterWorkIndex >= 0 && i == 0) {
+                continue;  // do not compare the iteration with flushing
+            }
+            bool matched = std::find(goldenMD5s.begin(), goldenMD5s.end(), mMD5Strings[i]) !=
+                           goldenMD5s.end();
+            EXPECT_TRUE(matched) << "Unknown MD5: " << mMD5Strings[i] << " at iter: " << i;
+        }
+    }
+}
+
+// Play input video once, end by draining.
+INSTANTIATE_TEST_CASE_P(SinglePlaythroughTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          1u, false, false)));
+// Play input video once, end by dummy EOS work.
+INSTANTIATE_TEST_CASE_P(DummyEOSWorkTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          1u, false, true)));
+
+// Play 5 times of input video, and check sanity by MD5Sum.
+INSTANTIATE_TEST_CASE_P(MultiplePlaythroughSanityTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
+                                                          5u, true, false)));
+
+// Test mid-stream flush then play once entirely.
+INSTANTIATE_TEST_CASE_P(FlushPlaythroughTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(40, 1u, true, false)));
+
+// Test mid-stream flush then stop.
+INSTANTIATE_TEST_CASE_P(FlushStopTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(
+                                static_cast<int>(FlushPoint::MID_STREAM_FLUSH), 0u, false, false)));
+
+// Test early flush (after a few works) then stop.
+INSTANTIATE_TEST_CASE_P(EarlyFlushStopTest, C2VDAComponentParamTest,
+                        ::testing::Values(std::make_tuple(0, 0u, false, false),
+                                          std::make_tuple(1, 0u, false, false),
+                                          std::make_tuple(2, 0u, false, false),
+                                          std::make_tuple(3, 0u, false, false)));
+
+// Test end-of-stream flush then stop.
+INSTANTIATE_TEST_CASE_P(
+        EndOfStreamFlushStopTest, C2VDAComponentParamTest,
+        ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::END_OF_STREAM_FLUSH), 0u,
+                                          false, false)));
+
+}  // namespace android
+
+static void usage(const char* me) {
+    fprintf(stderr, "usage: %s [-i test_video_data] [-r(ecord YUV)] [gtest options]\n", me);
+}
+
+int main(int argc, char** argv) {
+    ::testing::InitGoogleTest(&argc, argv);
+
+    int res;
+    while ((res = getopt(argc, argv, "i:r")) >= 0) {
+        switch (res) {
+        case 'i': {
+            android::gTestVideoData = optarg;
+            break;
+        }
+        case 'r': {
+            android::gRecordOutputYUV = true;
+            break;
+        }
+        default: {
+            usage(argv[0]);
+            exit(1);
+            break;
+        }
+        }
+    }
+
+    return RUN_ALL_TESTS();
+}
diff --git a/tests/data/bear-vp8.webm b/tests/data/bear-vp8.webm
new file mode 100644
index 0000000..02ae36c
--- /dev/null
+++ b/tests/data/bear-vp8.webm
Binary files differ
diff --git a/tests/data/bear-vp8.webm.md5 b/tests/data/bear-vp8.webm.md5
new file mode 100644
index 0000000..25d983c
--- /dev/null
+++ b/tests/data/bear-vp8.webm.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82"
+# ARM - Mali
+056a2484b34bc78637b37b36481027c6
+# Intel
+fdc9d348b06a77e65a8aa0ccc120c6f9
diff --git a/tests/data/bear-vp9.webm b/tests/data/bear-vp9.webm
new file mode 100644
index 0000000..4f497ae
--- /dev/null
+++ b/tests/data/bear-vp9.webm
Binary files differ
diff --git a/tests/data/bear-vp9.webm.md5 b/tests/data/bear-vp9.webm.md5
new file mode 100644
index 0000000..99810d5
--- /dev/null
+++ b/tests/data/bear-vp9.webm.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82"
+# ARM - Mali
+7228c16473724e4dff2fc55edcf94683
+# Intel
+058213ed7a7e119838564001b7ee8004
diff --git a/tests/data/bear.mp4 b/tests/data/bear.mp4
new file mode 100644
index 0000000..f1d30fb
--- /dev/null
+++ b/tests/data/bear.mp4
Binary files differ
diff --git a/tests/data/bear.mp4.md5 b/tests/data/bear.mp4.md5
new file mode 100644
index 0000000..d8f8c2d
--- /dev/null
+++ b/tests/data/bear.mp4.md5
@@ -0,0 +1,5 @@
+# gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84"
+# ARM - Mali
+a3ea733a472e222608d690e91e6c88cc
+# Intel
+431076e337c24fe71a50ae07c64fdf3c
diff --git a/vda/.clang-format b/vda/.clang-format
new file mode 100644
index 0000000..151d19a
--- /dev/null
+++ b/vda/.clang-format
@@ -0,0 +1,4 @@
+# The codes in this directory are ported from Chromium Project.
+# Therefore, they are obviously based on Chromium coding style
+# and shouldn't be formatted by Android Coding Style
+BasedOnStyle: None
diff --git a/vda/Android.bp b/vda/Android.bp
new file mode 100644
index 0000000..5f84535
--- /dev/null
+++ b/vda/Android.bp
@@ -0,0 +1,53 @@
+cc_library_shared {
+    name: "libv4l2_codec2_vda",
+    srcs: [
+        "bit_reader.cc",
+        "bit_reader_core.cc",
+        "bitstream_buffer.cc",
+        "h264_bit_reader.cc",
+        "h264_decoder.cc",
+        "h264_dpb.cc",
+        "h264_parser.cc",
+        "native_pixmap_handle.cc",
+        "picture.cc",
+        "ranges.cc",
+        "shared_memory_region.cc",
+        "v4l2_device.cc",
+        "v4l2_slice_video_decode_accelerator.cc",
+        "v4l2_video_decode_accelerator.cc",
+        "video_codecs.cc",
+        "video_decode_accelerator.cc",
+        "vp8_bool_decoder.cc",
+        "vp8_decoder.cc",
+        "vp8_parser.cc",
+        "vp8_picture.cc",
+        "vp9_bool_decoder.cc",
+        "vp9_compressed_header_parser.cc",
+        "vp9_decoder.cc",
+        "vp9_parser.cc",
+        "vp9_picture.cc",
+        "vp9_raw_bits_reader.cc",
+        "vp9_uncompressed_header_parser.cc",
+    ],
+
+    shared_libs: ["libchrome"],
+    // -Wno-unused-parameter is needed for libchrome/base codes
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wno-unused-parameter",
+    ],
+    clang: true,
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+
+    ldflags: [
+        "-Wl",
+        "-Bsymbolic",
+    ],
+    export_include_dirs: ["."],
+}
diff --git a/vda/Android.mk b/vda/Android.mk
deleted file mode 100644
index 08a88ea..0000000
--- a/vda/Android.mk
+++ /dev/null
@@ -1,48 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_CPP_EXTENSION:= .cc
-LOCAL_SRC_FILES:= \
-        bit_reader.cc       \
-        bit_reader_core.cc  \
-        bitstream_buffer.cc \
-        h264_bit_reader.cc  \
-        h264_decoder.cc     \
-        h264_dpb.cc         \
-        h264_parser.cc      \
-        picture.cc          \
-        ranges.cc           \
-        shared_memory_region.cc \
-        v4l2_device.cc      \
-        v4l2_slice_video_decode_accelerator.cc \
-        video_codecs.cc     \
-        video_decode_accelerator.cc \
-        vp8_bool_decoder.cc \
-        vp8_decoder.cc      \
-        vp8_parser.cc       \
-        vp8_picture.cc      \
-        vp9_bool_decoder.cc \
-        vp9_compressed_header_parser.cc \
-        vp9_decoder.cc      \
-        vp9_parser.cc       \
-        vp9_picture.cc      \
-        vp9_raw_bits_reader.cc \
-        vp9_uncompressed_header_parser.cc \
-
-# gtest/include is for included file from libchrome/base/gtest_prod_util.h
-LOCAL_C_INCLUDES += \
-        $(TOP)/external/libchrome \
-        $(TOP)/external/gtest/include \
-
-LOCAL_MODULE:= libv4l2_codec2_vda
-
-LOCAL_SHARED_LIBRARIES := libchrome \
-
-# -Wno-unused-parameter is needed for libchrome/base codes
-LOCAL_CFLAGS += -Werror -Wall -Wno-unused-parameter
-LOCAL_CLANG := true
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
-
-LOCAL_LDFLAGS := -Wl,-Bsymbolic
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/vda/accelerated_video_decoder.h b/vda/accelerated_video_decoder.h
index fe1c711..238e34d 100644
--- a/vda/accelerated_video_decoder.h
+++ b/vda/accelerated_video_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
 
 #ifndef ACCELERATED_VIDEO_DECODER_H_
 #define ACCELERATED_VIDEO_DECODER_H_
diff --git a/vda/bit_reader.cc b/vda/bit_reader.cc
index 953d144..95e7634 100644
--- a/vda/bit_reader.cc
+++ b/vda/bit_reader.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bit_reader.h"
 
@@ -15,7 +16,7 @@
   DCHECK_GE(size, 0);
 }
 
-BitReader::~BitReader() {}
+BitReader::~BitReader() = default;
 
 bool BitReader::ReadString(int num_bits, std::string* str) {
   DCHECK_EQ(num_bits % 8, 0);
diff --git a/vda/bit_reader.h b/vda/bit_reader.h
index 2b3fad0..dfc2b0b 100644
--- a/vda/bit_reader.h
+++ b/vda/bit_reader.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 43ddd7a
 
 #ifndef BIT_READER_H_
 #define BIT_READER_H_
@@ -14,8 +15,7 @@
 
 namespace media {
 
-class BitReader
-    : NON_EXPORTED_BASE(private BitReaderCore::ByteStreamProvider)  {
+class BitReader : private BitReaderCore::ByteStreamProvider {
  public:
   // Initialize the reader to start reading at |data|, |size| being size
   // of |data| in bytes.
diff --git a/vda/bit_reader_core.cc b/vda/bit_reader_core.cc
index 220ea03..92b3211 100644
--- a/vda/bit_reader_core.cc
+++ b/vda/bit_reader_core.cc
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bit_reader_core.h"
 
@@ -14,11 +15,9 @@
 
 namespace media {
 
-BitReaderCore::ByteStreamProvider::ByteStreamProvider() {
-}
+BitReaderCore::ByteStreamProvider::ByteStreamProvider() = default;
 
-BitReaderCore::ByteStreamProvider::~ByteStreamProvider() {
-}
+BitReaderCore::ByteStreamProvider::~ByteStreamProvider() = default;
 
 BitReaderCore::BitReaderCore(ByteStreamProvider* byte_stream_provider)
     : byte_stream_provider_(byte_stream_provider),
@@ -29,8 +28,7 @@
       reg_next_(0) {
 }
 
-BitReaderCore::~BitReaderCore() {
-}
+BitReaderCore::~BitReaderCore() = default;
 
 bool BitReaderCore::ReadFlag(bool* flag) {
   if (nbits_ == 0 && !Refill(1))
diff --git a/vda/bit_reader_core.h b/vda/bit_reader_core.h
index 9e73018..62a21e2 100644
--- a/vda/bit_reader_core.h
+++ b/vda/bit_reader_core.h
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef BIT_READER_CORE_H_
 #define BIT_READER_CORE_H_
diff --git a/vda/bitstream_buffer.cc b/vda/bitstream_buffer.cc
index 4f71755..36b8d06 100644
--- a/vda/bitstream_buffer.cc
+++ b/vda/bitstream_buffer.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "bitstream_buffer.h"
 
@@ -22,6 +23,6 @@
 
 BitstreamBuffer::BitstreamBuffer(const BitstreamBuffer& other) = default;
 
-BitstreamBuffer::~BitstreamBuffer() {}
+BitstreamBuffer::~BitstreamBuffer() = default;
 
 }  // namespace media
diff --git a/vda/bitstream_buffer.h b/vda/bitstream_buffer.h
index 88555a28..3a267a0 100644
--- a/vda/bitstream_buffer.h
+++ b/vda/bitstream_buffer.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 39a7f93
 
 #ifndef MEDIA_BASE_BITSTREAM_BUFFER_H_
 #define MEDIA_BASE_BITSTREAM_BUFFER_H_
diff --git a/vda/h264_bit_reader.cc b/vda/h264_bit_reader.cc
index 7c536b3..6713655 100644
--- a/vda/h264_bit_reader.cc
+++ b/vda/h264_bit_reader.cc
@@ -1,6 +1,7 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "base/logging.h"
 #include "h264_bit_reader.h"
@@ -15,7 +16,7 @@
       prev_two_bytes_(0),
       emulation_prevention_bytes_(0) {}
 
-H264BitReader::~H264BitReader() {}
+H264BitReader::~H264BitReader() = default;
 
 bool H264BitReader::Initialize(const uint8_t* data, off_t size) {
   DCHECK(data);
diff --git a/vda/h264_bit_reader.h b/vda/h264_bit_reader.h
index 156b524..aa162ce 100644
--- a/vda/h264_bit_reader.h
+++ b/vda/h264_bit_reader.h
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 //
 // This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 77be7ae
 
 #ifndef H264_BIT_READER_H_
 #define H264_BIT_READER_H_
diff --git a/vda/h264_decoder.cc b/vda/h264_decoder.cc
index 3964059..abaaac5 100644
--- a/vda/h264_decoder.cc
+++ b/vda/h264_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: c3bd64c
 
 #include <algorithm>
 #include <limits>
@@ -16,22 +17,22 @@
 
 namespace media {
 
-H264Decoder::H264Accelerator::H264Accelerator() {}
+H264Decoder::H264Accelerator::H264Accelerator() = default;
 
-H264Decoder::H264Accelerator::~H264Accelerator() {}
+H264Decoder::H264Accelerator::~H264Accelerator() = default;
 
 H264Decoder::H264Decoder(H264Accelerator* accelerator)
-    : max_frame_num_(0),
+    : state_(kNeedStreamMetadata),
+      max_frame_num_(0),
       max_pic_num_(0),
       max_long_term_frame_idx_(0),
       max_num_reorder_frames_(0),
       accelerator_(accelerator) {
   DCHECK(accelerator_);
   Reset();
-  state_ = kNeedStreamMetadata;
 }
 
-H264Decoder::~H264Decoder() {}
+H264Decoder::~H264Decoder() = default;
 
 void H264Decoder::Reset() {
   curr_pic_ = nullptr;
@@ -177,6 +178,8 @@
            sizeof(curr_pic_->ref_pic_marking));
   }
 
+  curr_pic_->visible_rect = visible_rect_;
+
   return true;
 }
 
@@ -1107,10 +1110,24 @@
   if (max_dpb_mbs == 0)
     return false;
 
-  size_t max_dpb_size = std::min(max_dpb_mbs / (width_mb * height_mb),
-                                 static_cast<int>(H264DPB::kDPBMaxSize));
-  if (max_dpb_size == 0) {
-    DVLOG(1) << "Invalid DPB Size";
+  // MaxDpbFrames from level limits per spec.
+  size_t max_dpb_frames = std::min(max_dpb_mbs / (width_mb * height_mb),
+                                   static_cast<int>(H264DPB::kDPBMaxSize));
+  DVLOG(1) << "MaxDpbFrames: " << max_dpb_frames
+           << ", max_num_ref_frames: " << sps->max_num_ref_frames
+           << ", max_dec_frame_buffering: " << sps->max_dec_frame_buffering;
+
+  // Set DPB size to at least the level limit, or what the stream requires.
+  size_t max_dpb_size =
+      std::max(static_cast<int>(max_dpb_frames),
+               std::max(sps->max_num_ref_frames, sps->max_dec_frame_buffering));
+  // Some non-conforming streams specify more frames are needed than the current
+  // level limit. Allow this, but only up to the maximum number of reference
+  // frames allowed per spec.
+  DVLOG_IF(1, max_dpb_size > max_dpb_frames)
+      << "Invalid stream, DPB size > MaxDpbFrames";
+  if (max_dpb_size == 0 || max_dpb_size > H264DPB::kDPBMaxSize) {
+    DVLOG(1) << "Invalid DPB size: " << max_dpb_size;
     return false;
   }
 
@@ -1124,6 +1141,12 @@
     dpb_.set_max_num_pics(max_dpb_size);
   }
 
+  Rect new_visible_rect = sps->GetVisibleRect().value_or(Rect());
+  if (visible_rect_ != new_visible_rect) {
+    DVLOG(2) << "New visible rect: " << new_visible_rect.ToString();
+    visible_rect_ = new_visible_rect;
+  }
+
   if (!UpdateMaxNumReorderFrames(sps))
     return false;
   DVLOG(1) << "max_num_reorder_frames: " << max_num_reorder_frames_;
@@ -1320,7 +1343,7 @@
         if (state_ != kDecoding)
           break;
 
-      // else fallthrough
+        // else fallthrough
       case H264NALU::kIDRSlice: {
         // TODO(posciak): the IDR may require an SPS that we don't have
         // available. For now we'd fail if that happens, but ideally we'd like
diff --git a/vda/h264_decoder.h b/vda/h264_decoder.h
index 27a4c10..82ab98f 100644
--- a/vda/h264_decoder.h
+++ b/vda/h264_decoder.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77be7ae
 
 #ifndef H264_DECODER_H_
 #define H264_DECODER_H_
@@ -16,6 +17,7 @@
 #include "accelerated_video_decoder.h"
 #include "h264_dpb.h"
 #include "h264_parser.h"
+#include "rect.h"
 #include "size.h"
 
 namespace media {
@@ -266,6 +268,8 @@
 
   // Output picture size.
   Size pic_size_;
+  // Output visible cropping rect.
+  Rect visible_rect_;
 
   // PicOrderCount of the previously outputted frame.
   int last_output_poc_;
diff --git a/vda/h264_dpb.cc b/vda/h264_dpb.cc
index 0e1b411..af0b5e0 100644
--- a/vda/h264_dpb.cc
+++ b/vda/h264_dpb.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include <string.h>
 
@@ -44,14 +45,14 @@
   memset(&ref_pic_marking, 0, sizeof(ref_pic_marking));
 }
 
-H264Picture::~H264Picture() {}
+H264Picture::~H264Picture() = default;
 
 V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
   return nullptr;
 }
 
 H264DPB::H264DPB() : max_num_pics_(0) {}
-H264DPB::~H264DPB() {}
+H264DPB::~H264DPB() = default;
 
 void H264DPB::Clear() {
   pics_.clear();
diff --git a/vda/h264_dpb.h b/vda/h264_dpb.h
index 6be9f21..3da284e 100644
--- a/vda/h264_dpb.h
+++ b/vda/h264_dpb.h
@@ -4,6 +4,7 @@
 //
 // This file contains an implementation of an H.264 Decoded Picture Buffer
 // used in H264 decoders.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef H264_DPB_H_
 #define H264_DPB_H_
@@ -15,6 +16,7 @@
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "h264_parser.h"
+#include "rect.h"
 
 namespace media {
 
@@ -22,7 +24,7 @@
 
 // A picture (a frame or a field) in the H.264 spec sense.
 // See spec at http://www.itu.int/rec/T-REC-H.264
-class H264Picture : public base::RefCounted<H264Picture> {
+class H264Picture : public base::RefCountedThreadSafe<H264Picture> {
  public:
   using Vector = std::vector<scoped_refptr<H264Picture>>;
 
@@ -82,8 +84,12 @@
   // Position in DPB (i.e. index in DPB).
   int dpb_position;
 
+  // The visible size of picture. This could be either parsed from SPS, or set
+  // to Rect(0, 0) for indicating invalid values or not available.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<H264Picture>;
+  friend class base::RefCountedThreadSafe<H264Picture>;
   virtual ~H264Picture();
 
  private:
diff --git a/vda/h264_parser.cc b/vda/h264_parser.cc
index 0f37924..94b1e10 100644
--- a/vda/h264_parser.cc
+++ b/vda/h264_parser.cc
@@ -1,8 +1,10 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "h264_parser.h"
+#include "subsample_entry.h"
 
 #include <limits>
 #include <memory>
@@ -64,6 +66,60 @@
               map_unit * (pic_height_in_map_units_minus1 + 1));
 }
 
+// Also based on section 7.4.2.1.1.
+base::Optional<Rect> H264SPS::GetVisibleRect() const {
+  base::Optional<Size> coded_size = GetCodedSize();
+  if (!coded_size)
+    return base::nullopt;
+
+  if (!frame_cropping_flag)
+    return Rect(coded_size.value());
+
+  int crop_unit_x;
+  int crop_unit_y;
+  if (chroma_array_type == 0) {
+    crop_unit_x = 1;
+    crop_unit_y = frame_mbs_only_flag ? 1 : 2;
+  } else {
+    // Section 6.2.
+    // |chroma_format_idc| may be:
+    //   1 => 4:2:0
+    //   2 => 4:2:2
+    //   3 => 4:4:4
+    // Everything else has |chroma_array_type| == 0.
+    int sub_width_c = chroma_format_idc > 2 ? 1 : 2;
+    int sub_height_c = chroma_format_idc > 1 ? 1 : 2;
+    crop_unit_x = sub_width_c;
+    crop_unit_y = sub_height_c * (frame_mbs_only_flag ? 1 : 2);
+  }
+
+  // Verify that the values are not too large before multiplying.
+  if (coded_size->width() / crop_unit_x < frame_crop_left_offset ||
+      coded_size->width() / crop_unit_x < frame_crop_right_offset ||
+      coded_size->height() / crop_unit_y < frame_crop_top_offset ||
+      coded_size->height() / crop_unit_y < frame_crop_bottom_offset) {
+    DVLOG(1) << "Frame cropping exceeds coded size.";
+    return base::nullopt;
+  }
+  int crop_left = crop_unit_x * frame_crop_left_offset;
+  int crop_right = crop_unit_x * frame_crop_right_offset;
+  int crop_top = crop_unit_y * frame_crop_top_offset;
+  int crop_bottom = crop_unit_y * frame_crop_bottom_offset;
+
+  // Verify that the values are sane. Note that some decoders also require that
+  // crops are smaller than a macroblock and/or that crops must be adjacent to
+  // at least one corner of the coded frame.
+  if (coded_size->width() - crop_left <= crop_right ||
+      coded_size->height() - crop_top <= crop_bottom) {
+    DVLOG(1) << "Frame cropping excludes entire frame.";
+    return base::nullopt;
+  }
+
+  return Rect(crop_left, crop_top,
+              coded_size->width() - crop_left - crop_right,
+              coded_size->height() - crop_top - crop_bottom);
+}
+
 H264PPS::H264PPS() {
   memset(this, 0, sizeof(*this));
 }
@@ -134,12 +190,10 @@
 
 // ISO 14496 part 10
 // VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
-static const int kTableSarWidth[] = {
-  0, 1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160, 4, 3, 2
-};
-static const int kTableSarHeight[] = {
-  0, 1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99, 3, 2, 1
-};
+static const int kTableSarWidth[] = {0,  1,  12, 10, 16,  40, 24, 20, 32,
+                                     80, 18, 15, 64, 160, 4,  3,  2};
+static const int kTableSarHeight[] = {0,  1,  11, 11, 11, 33, 11, 11, 11,
+                                      33, 11, 11, 33, 99, 3,  2,  1};
 static_assert(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
               "sar tables must have the same size");
 
@@ -147,8 +201,7 @@
   Reset();
 }
 
-H264Parser::~H264Parser() {
-}
+H264Parser::~H264Parser() = default;
 
 void H264Parser::Reset() {
   stream_ = NULL;
@@ -217,6 +270,19 @@
   off_t bytes_left = data_size;
 
   while (bytes_left >= 3) {
+    // The start code is "\0\0\1", ones are more unusual than zeroes, so let's
+    // search for it first.
+    const uint8_t* tmp =
+        reinterpret_cast<const uint8_t*>(memchr(data + 2, 1, bytes_left - 2));
+    if (!tmp) {
+      data += bytes_left - 2;
+      bytes_left = 2;
+      break;
+    }
+    tmp -= 2;
+    bytes_left -= tmp - data;
+    data = tmp;
+
     if (IsStartCode(data)) {
       // Found three-byte start code, set pointer at its beginning.
       *offset = data_size - bytes_left;
@@ -251,8 +317,7 @@
   off_t nalu_start_off = 0;
   off_t annexb_start_code_size = 0;
 
-  if (!FindStartCodeInClearRanges(stream_, bytes_left_,
-                                  encrypted_ranges_,
+  if (!FindStartCodeInClearRanges(stream_, bytes_left_, encrypted_ranges_,
                                   &nalu_start_off, &annexb_start_code_size)) {
     DVLOG(4) << "Could not find start code, end of stream?";
     return false;
@@ -277,10 +342,9 @@
   // belong to the current NALU.
   off_t next_start_code_size = 0;
   off_t nalu_size_without_start_code = 0;
-  if (!FindStartCodeInClearRanges(nalu_data, max_nalu_data_size,
-                                  encrypted_ranges_,
-                                  &nalu_size_without_start_code,
-                                  &next_start_code_size)) {
+  if (!FindStartCodeInClearRanges(
+          nalu_data, max_nalu_data_size, encrypted_ranges_,
+          &nalu_size_without_start_code, &next_start_code_size)) {
     nalu_size_without_start_code = max_nalu_data_size;
   }
   *nalu_size = nalu_size_without_start_code + annexb_start_code_size;
@@ -288,6 +352,7 @@
   return true;
 }
 
+// static
 bool H264Parser::FindStartCodeInClearRanges(
     const uint8_t* data,
     off_t data_size,
@@ -325,6 +390,30 @@
   return true;
 }
 
+// static
+bool H264Parser::ParseNALUs(const uint8_t* stream,
+                            size_t stream_size,
+                            std::vector<H264NALU>* nalus) {
+  DCHECK(nalus);
+  H264Parser parser;
+  parser.SetStream(stream, stream_size);
+
+  while (true) {
+    H264NALU nalu;
+    const H264Parser::Result result = parser.AdvanceToNextNALU(&nalu);
+    if (result == H264Parser::kOk) {
+      nalus->push_back(nalu);
+    } else if (result == media::H264Parser::kEOStream) {
+      return true;
+    } else {
+      DLOG(ERROR) << "Unexpected H264 parser result";
+      return false;
+    }
+  }
+  NOTREACHED();
+  return false;
+}
+
 H264Parser::Result H264Parser::ReadUE(int* val) {
   int num_bits = -1;
   int bit;
@@ -381,6 +470,8 @@
   if (!LocateNALU(&nalu_size_with_start_code, &start_code_size)) {
     DVLOG(4) << "Could not find next NALU, bytes left in stream: "
              << bytes_left_;
+    stream_ = nullptr;
+    bytes_left_ = 0;
     return kEOStream;
   }
 
@@ -389,8 +480,11 @@
   DVLOG(4) << "NALU found: size=" << nalu_size_with_start_code;
 
   // Initialize bit reader at the start of found NALU.
-  if (!br_.Initialize(nalu->data, nalu->size))
+  if (!br_.Initialize(nalu->data, nalu->size)) {
+    stream_ = nullptr;
+    bytes_left_ = 0;
     return kEOStream;
+  }
 
   // Move parser state to after this NALU, so next time AdvanceToNextNALU
   // is called, we will effectively be skipping it;
@@ -417,22 +511,26 @@
 
 // Default scaling lists (per spec).
 static const int kDefault4x4Intra[kH264ScalingList4x4Length] = {
-    6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42, };
+    6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32, 32, 32, 37, 37, 42,
+};
 
 static const int kDefault4x4Inter[kH264ScalingList4x4Length] = {
-    10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34, };
+    10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27, 27, 27, 30, 30, 34,
+};
 
 static const int kDefault8x8Intra[kH264ScalingList8x8Length] = {
     6,  10, 10, 13, 11, 13, 16, 16, 16, 16, 18, 18, 18, 18, 18, 23,
     23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27,
     27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31,
-    31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42, };
+    31, 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42,
+};
 
 static const int kDefault8x8Inter[kH264ScalingList8x8Length] = {
     9,  13, 13, 15, 13, 15, 17, 17, 17, 17, 19, 19, 19, 19, 19, 21,
     21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24, 24, 24,
     24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27,
-    27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35, };
+    27, 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35,
+};
 
 static inline void DefaultScalingList4x4(
     int i,
@@ -579,8 +677,7 @@
 
     if (seq_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(sps->scaling_list4x4[i]),
-                             sps->scaling_list4x4[i],
-                             &use_default);
+                             sps->scaling_list4x4[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -588,8 +685,8 @@
         DefaultScalingList4x4(i, sps->scaling_list4x4);
 
     } else {
-      FallbackScalingList4x4(
-          i, kDefault4x4Intra, kDefault4x4Inter, sps->scaling_list4x4);
+      FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+                             sps->scaling_list4x4);
     }
   }
 
@@ -599,8 +696,7 @@
 
     if (seq_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(sps->scaling_list8x8[i]),
-                             sps->scaling_list8x8[i],
-                             &use_default);
+                             sps->scaling_list8x8[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -608,8 +704,8 @@
         DefaultScalingList8x8(i, sps->scaling_list8x8);
 
     } else {
-      FallbackScalingList8x8(
-          i, kDefault8x8Intra, kDefault8x8Inter, sps->scaling_list8x8);
+      FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+                             sps->scaling_list8x8);
     }
   }
 
@@ -628,8 +724,7 @@
 
     if (pic_scaling_list_present_flag) {
       res = ParseScalingList(arraysize(pps->scaling_list4x4[i]),
-                             pps->scaling_list4x4[i],
-                             &use_default);
+                             pps->scaling_list4x4[i], &use_default);
       if (res != kOk)
         return res;
 
@@ -639,14 +734,12 @@
     } else {
       if (!sps.seq_scaling_matrix_present_flag) {
         // Table 7-2 fallback rule A in spec.
-        FallbackScalingList4x4(
-            i, kDefault4x4Intra, kDefault4x4Inter, pps->scaling_list4x4);
+        FallbackScalingList4x4(i, kDefault4x4Intra, kDefault4x4Inter,
+                               pps->scaling_list4x4);
       } else {
         // Table 7-2 fallback rule B in spec.
-        FallbackScalingList4x4(i,
-                               sps.scaling_list4x4[0],
-                               sps.scaling_list4x4[3],
-                               pps->scaling_list4x4);
+        FallbackScalingList4x4(i, sps.scaling_list4x4[0],
+                               sps.scaling_list4x4[3], pps->scaling_list4x4);
       }
     }
   }
@@ -657,8 +750,7 @@
 
       if (pic_scaling_list_present_flag) {
         res = ParseScalingList(arraysize(pps->scaling_list8x8[i]),
-                               pps->scaling_list8x8[i],
-                               &use_default);
+                               pps->scaling_list8x8[i], &use_default);
         if (res != kOk)
           return res;
 
@@ -668,14 +760,12 @@
       } else {
         if (!sps.seq_scaling_matrix_present_flag) {
           // Table 7-2 fallback rule A in spec.
-          FallbackScalingList8x8(
-              i, kDefault8x8Intra, kDefault8x8Inter, pps->scaling_list8x8);
+          FallbackScalingList8x8(i, kDefault8x8Intra, kDefault8x8Inter,
+                                 pps->scaling_list8x8);
         } else {
           // Table 7-2 fallback rule B in spec.
-          FallbackScalingList8x8(i,
-                                 sps.scaling_list8x8[0],
-                                 sps.scaling_list8x8[1],
-                                 pps->scaling_list8x8);
+          FallbackScalingList8x8(i, sps.scaling_list8x8[0],
+                                 sps.scaling_list8x8[1], pps->scaling_list8x8);
         }
       }
     }
@@ -697,8 +787,8 @@
   IN_RANGE_OR_RETURN(cpb_cnt_minus1, 0, 31);
   READ_BITS_OR_RETURN(8, &data);  // bit_rate_scale, cpb_size_scale
   for (int i = 0; i <= cpb_cnt_minus1; ++i) {
-    READ_UE_OR_RETURN(&data);  // bit_rate_value_minus1[i]
-    READ_UE_OR_RETURN(&data);  // cpb_size_value_minus1[i]
+    READ_UE_OR_RETURN(&data);    // bit_rate_value_minus1[i]
+    READ_UE_OR_RETURN(&data);    // cpb_size_value_minus1[i]
     READ_BOOL_OR_RETURN(&data);  // cbr_flag
   }
   READ_BITS_OR_RETURN(20, &data);  // cpb/dpb delays, etc.
@@ -755,7 +845,7 @@
     READ_BITS_OR_RETURN(16, &data);  // num_units_in_tick
     READ_BITS_OR_RETURN(16, &data);  // time_scale
     READ_BITS_OR_RETURN(16, &data);  // time_scale
-    READ_BOOL_OR_RETURN(&data);  // fixed_frame_rate_flag
+    READ_BOOL_OR_RETURN(&data);      // fixed_frame_rate_flag
   }
 
   // Read and ignore NAL HRD parameters, if present.
@@ -769,22 +859,22 @@
   if (res != kOk)
     return res;
 
-  if (hrd_parameters_present)  // One of NAL or VCL params present is enough.
+  if (hrd_parameters_present)    // One of NAL or VCL params present is enough.
     READ_BOOL_OR_RETURN(&data);  // low_delay_hrd_flag
 
   READ_BOOL_OR_RETURN(&data);  // pic_struct_present_flag
   READ_BOOL_OR_RETURN(&sps->bitstream_restriction_flag);
   if (sps->bitstream_restriction_flag) {
     READ_BOOL_OR_RETURN(&data);  // motion_vectors_over_pic_boundaries_flag
-    READ_UE_OR_RETURN(&data);  // max_bytes_per_pic_denom
-    READ_UE_OR_RETURN(&data);  // max_bits_per_mb_denom
-    READ_UE_OR_RETURN(&data);  // log2_max_mv_length_horizontal
-    READ_UE_OR_RETURN(&data);  // log2_max_mv_length_vertical
+    READ_UE_OR_RETURN(&data);    // max_bytes_per_pic_denom
+    READ_UE_OR_RETURN(&data);    // max_bits_per_mb_denom
+    READ_UE_OR_RETURN(&data);    // log2_max_mv_length_horizontal
+    READ_UE_OR_RETURN(&data);    // log2_max_mv_length_vertical
     READ_UE_OR_RETURN(&sps->max_num_reorder_frames);
     READ_UE_OR_RETURN(&sps->max_dec_frame_buffering);
     TRUE_OR_RETURN(sps->max_dec_frame_buffering >= sps->max_num_ref_frames);
-    IN_RANGE_OR_RETURN(
-        sps->max_num_reorder_frames, 0, sps->max_dec_frame_buffering);
+    IN_RANGE_OR_RETURN(sps->max_num_reorder_frames, 0,
+                       sps->max_dec_frame_buffering);
   }
 
   return kOk;
@@ -1072,7 +1162,6 @@
     int luma_log2_weight_denom,
     int chroma_log2_weight_denom,
     H264WeightingFactors* w_facts) {
-
   int def_luma_weight = 1 << luma_log2_weight_denom;
   int def_chroma_weight = 1 << chroma_log2_weight_denom;
 
@@ -1120,20 +1209,18 @@
     READ_UE_OR_RETURN(&shdr->chroma_log2_weight_denom);
   TRUE_OR_RETURN(shdr->chroma_log2_weight_denom < 8);
 
-  Result res = ParseWeightingFactors(shdr->num_ref_idx_l0_active_minus1,
-                                     sps.chroma_array_type,
-                                     shdr->luma_log2_weight_denom,
-                                     shdr->chroma_log2_weight_denom,
-                                     &shdr->pred_weight_table_l0);
+  Result res = ParseWeightingFactors(
+      shdr->num_ref_idx_l0_active_minus1, sps.chroma_array_type,
+      shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+      &shdr->pred_weight_table_l0);
   if (res != kOk)
     return res;
 
   if (shdr->IsBSlice()) {
-    res = ParseWeightingFactors(shdr->num_ref_idx_l1_active_minus1,
-                                sps.chroma_array_type,
-                                shdr->luma_log2_weight_denom,
-                                shdr->chroma_log2_weight_denom,
-                                &shdr->pred_weight_table_l1);
+    res = ParseWeightingFactors(
+        shdr->num_ref_idx_l1_active_minus1, sps.chroma_array_type,
+        shdr->luma_log2_weight_denom, shdr->chroma_log2_weight_denom,
+        &shdr->pred_weight_table_l1);
     if (res != kOk)
       return res;
   }
diff --git a/vda/h264_parser.h b/vda/h264_parser.h
index fdd3f77..a29685a 100644
--- a/vda/h264_parser.h
+++ b/vda/h264_parser.h
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 //
 // This file contains an implementation of an H264 Annex-B video stream parser.
+// Note: ported from Chromium commit head: 0a918e9
 
 #ifndef H264_PARSER_H_
 #define H264_PARSER_H_
@@ -19,6 +20,7 @@
 #include "base/optional.h"
 #include "h264_bit_reader.h"
 #include "ranges.h"
+#include "rect.h"
 #include "size.h"
 #include "subsample_entry.h"
 
@@ -142,8 +144,8 @@
   int frame_crop_bottom_offset;
 
   bool vui_parameters_present_flag;
-  int sar_width;    // Set to 0 when not specified.
-  int sar_height;   // Set to 0 when not specified.
+  int sar_width;   // Set to 0 when not specified.
+  int sar_height;  // Set to 0 when not specified.
   bool bitstream_restriction_flag;
   int max_num_reorder_frames;
   int max_dec_frame_buffering;
@@ -181,6 +183,7 @@
   // base::nullopt if they encounter integer overflow. They do not verify that
   // the results are in-spec for the given profile or level.
   base::Optional<Size> GetCodedSize() const;
+  base::Optional<Rect> GetVisibleRect() const;
 };
 
 struct H264PPS {
@@ -239,10 +242,7 @@
 struct H264SliceHeader {
   H264SliceHeader();
 
-  enum {
-    kRefListSize = 32,
-    kRefListModSize = kRefListSize
-  };
+  enum { kRefListSize = 32, kRefListModSize = kRefListSize };
 
   enum Type {
     kPSlice = 0,
@@ -258,11 +258,11 @@
   bool IsSPSlice() const;
   bool IsSISlice() const;
 
-  bool idr_pic_flag;       // from NAL header
-  int nal_ref_idc;         // from NAL header
+  bool idr_pic_flag;         // from NAL header
+  int nal_ref_idc;           // from NAL header
   const uint8_t* nalu_data;  // from NAL header
-  off_t nalu_size;         // from NAL header
-  off_t header_bit_size;   // calculated
+  off_t nalu_size;           // from NAL header
+  off_t header_bit_size;     // calculated
 
   int first_mb_in_slice;
   int slice_type;
@@ -377,6 +377,12 @@
                                          off_t* offset,
                                          off_t* start_code_size);
 
+  // Parses the input stream and returns all the NALUs through |nalus|. Returns
+  // false if the stream is invalid.
+  static bool ParseNALUs(const uint8_t* stream,
+                         size_t stream_size,
+                         std::vector<H264NALU>* nalus);
+
   H264Parser();
   ~H264Parser();
 
diff --git a/vda/native_pixmap_handle.cc b/vda/native_pixmap_handle.cc
new file mode 100644
index 0000000..050a683
--- /dev/null
+++ b/vda/native_pixmap_handle.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#include "native_pixmap_handle.h"
+
+namespace media {
+
+NativePixmapPlane::NativePixmapPlane()
+    : stride(0), offset(0), size(0), modifier(0) {}
+
+NativePixmapPlane::NativePixmapPlane(int stride,
+                                     int offset,
+                                     uint64_t size,
+                                     uint64_t modifier)
+    : stride(stride), offset(offset), size(size), modifier(modifier) {}
+
+NativePixmapPlane::NativePixmapPlane(const NativePixmapPlane& other) = default;
+
+NativePixmapPlane::~NativePixmapPlane() {}
+
+NativePixmapHandle::NativePixmapHandle() {}
+NativePixmapHandle::NativePixmapHandle(const NativePixmapHandle& other) =
+    default;
+
+NativePixmapHandle::~NativePixmapHandle() {}
+
+}  // namespace media
diff --git a/vda/native_pixmap_handle.h b/vda/native_pixmap_handle.h
new file mode 100644
index 0000000..62e2294
--- /dev/null
+++ b/vda/native_pixmap_handle.h
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: a9d98e6
+
+#ifndef NATIVE_PIXMAP_HANDLE_H_
+#define NATIVE_PIXMAP_HANDLE_H_
+
+#include <vector>
+
+#include "base/file_descriptor_posix.h"
+
+namespace media {
+
+// NativePixmapPlane is used to carry the plane related information for GBM
+// buffer. More fields can be added if they are plane specific.
+struct NativePixmapPlane {
+  // This is the same value as DRM_FORMAT_MOD_INVALID, which is not a valid
+  // modifier. We use this to indicate that layout information
+  // (tiling/compression) if any will be communicated out of band.
+  static constexpr uint64_t kNoModifier = 0x00ffffffffffffffULL;
+
+  NativePixmapPlane();
+  NativePixmapPlane(int stride,
+                    int offset,
+                    uint64_t size,
+                    uint64_t modifier = kNoModifier);
+  NativePixmapPlane(const NativePixmapPlane& other);
+  ~NativePixmapPlane();
+
+  // The strides and offsets in bytes to be used when accessing the buffers via
+  // a memory mapping. One per plane per entry.
+  int stride;
+  int offset;
+  // Size in bytes of the plane.
+  // This is necessary to map the buffers.
+  uint64_t size;
+  // The modifier is retrieved from GBM library and passed to EGL driver.
+  // Generally it's platform specific, and we don't need to modify it in
+  // Chromium code. Also one per plane per entry.
+  uint64_t modifier;
+};
+
+struct NativePixmapHandle {
+  NativePixmapHandle();
+  NativePixmapHandle(const NativePixmapHandle& other);
+
+  ~NativePixmapHandle();
+
+  // File descriptors for the underlying memory objects (usually dmabufs).
+  std::vector<base::FileDescriptor> fds;
+  std::vector<NativePixmapPlane> planes;
+};
+
+}  // namespace media
+
+#endif  // NATIVE_PIXMAP_HANDLE_H_
diff --git a/vda/picture.cc b/vda/picture.cc
index a086725..8933bc5 100644
--- a/vda/picture.cc
+++ b/vda/picture.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "picture.h"
 
@@ -18,7 +19,7 @@
 
 PictureBuffer::PictureBuffer(const PictureBuffer& other) = default;
 
-PictureBuffer::~PictureBuffer() {}
+PictureBuffer::~PictureBuffer() = default;
 
 Picture::Picture(int32_t picture_buffer_id,
                  int32_t bitstream_buffer_id,
diff --git a/vda/picture.h b/vda/picture.h
index 3dbf0e9..e07b677 100644
--- a/vda/picture.h
+++ b/vda/picture.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: d264e47
 
 #ifndef PICTURE_H_
 #define PICTURE_H_
diff --git a/vda/ranges.cc b/vda/ranges.cc
index 00400b5..4394011 100644
--- a/vda/ranges.cc
+++ b/vda/ranges.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: a4f94d3
 
 #include "ranges.h"
 
diff --git a/vda/ranges.h b/vda/ranges.h
index 98b32ce..6a76ae4 100644
--- a/vda/ranges.h
+++ b/vda/ranges.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef RANGES_H_
 #define RANGES_H_
diff --git a/vda/rect.h b/vda/rect.h
index d9640b2..b23e19d 100644
--- a/vda/rect.h
+++ b/vda/rect.h
@@ -1,6 +1,8 @@
 // Copyright 2017 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 0e161fe
+// Note: only necessary functions are ported from gfx::Rect
 
 // Defines a simple integer rectangle class.  The containment semantics
 // are array-like; that is, the coordinate (x, y) is considered to be
diff --git a/vda/shared_memory_region.cc b/vda/shared_memory_region.cc
index ed56559..775a5f2 100644
--- a/vda/shared_memory_region.cc
+++ b/vda/shared_memory_region.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #include "base/sys_info.h"
 #include "shared_memory_region.h"
diff --git a/vda/shared_memory_region.h b/vda/shared_memory_region.h
index ce9a322..3c5d4b3 100644
--- a/vda/shared_memory_region.h
+++ b/vda/shared_memory_region.h
@@ -1,6 +1,7 @@
 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #ifndef SHARED_MEMORY_REGION_H_
 #define SHARED_MEMORY_REGION_H_
diff --git a/vda/size.h b/vda/size.h
index 4806ddc..c3e8c82 100644
--- a/vda/size.h
+++ b/vda/size.h
@@ -1,6 +1,8 @@
 // Copyright 2017 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: a8e9f71
+// Note: only necessary functions are ported from gfx::Size
 
 #ifndef SIZE_H_
 #define SIZE_H_
diff --git a/vda/subsample_entry.h b/vda/subsample_entry.h
index e7529fb..1e0bfad 100644
--- a/vda/subsample_entry.h
+++ b/vda/subsample_entry.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7014d6d
 
 #ifndef SUBSAMPLE_ENTRY_H_
 #define SUBSAMPLE_ENTRY_H_
diff --git a/vda/v4l2_device.cc b/vda/v4l2_device.cc
index 16446d3..78ef474 100644
--- a/vda/v4l2_device.cc
+++ b/vda/v4l2_device.cc
@@ -1,6 +1,8 @@
 // Copyright 2014 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 09ea0d2
+// Note: it's also merged with generic_v4l2_device.cc (head: a9d98e6)
 
 #include <errno.h>
 #include <fcntl.h>
@@ -15,6 +17,10 @@
 #include "base/strings/stringprintf.h"
 #include "v4l2_device.h"
 
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
+
 namespace media {
 
 V4L2Device::V4L2Device() {}
@@ -47,7 +53,7 @@
       return PIXEL_FORMAT_ARGB;
 
     default:
-      DVLOG(1) << "Add more cases as needed";
+      DVLOGF(1) << "Add more cases as needed";
       return PIXEL_FORMAT_UNKNOWN;
   }
 }
@@ -131,7 +137,7 @@
       break;
 
     default:
-      DVLOG(1) << "Unhandled pixelformat " << std::hex << "0x" << pix_fmt;
+      VLOGF(1) << "Unhandled pixelformat " << std::hex << "0x" << pix_fmt;
       return profiles;
   }
 
@@ -156,7 +162,7 @@
   nfds = 1;
 
   if (poll_device) {
-    DVLOG(3) << "Poll(): adding device fd to poll() set";
+    DVLOGF(5) << "Poll(): adding device fd to poll() set";
     pollfds[nfds].fd = device_fd_.get();
     pollfds[nfds].events = POLLIN | POLLOUT | POLLERR | POLLPRI;
     pollfd = nfds;
@@ -164,7 +170,7 @@
   }
 
   if (HANDLE_EINTR(poll(pollfds, nfds, -1)) == -1) {
-    DPLOG(ERROR) << "poll() failed";
+    VPLOGF(1) << "poll() failed";
     return false;
   }
   *event_pending = (pollfd != -1 && pollfds[pollfd].revents & POLLPRI);
@@ -185,19 +191,19 @@
 }
 
 bool V4L2Device::SetDevicePollInterrupt() {
-  DVLOG(3) << "SetDevicePollInterrupt()";
+  DVLOGF(4);
 
   const uint64_t buf = 1;
   if (HANDLE_EINTR(write(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
       -1) {
-    DPLOG(ERROR) << "SetDevicePollInterrupt(): write() failed";
+    VPLOGF(1) << "write() failed";
     return false;
   }
   return true;
 }
 
 bool V4L2Device::ClearDevicePollInterrupt() {
-  DVLOG(3) << "ClearDevicePollInterrupt()";
+  DVLOGF(5);
 
   uint64_t buf;
   if (HANDLE_EINTR(read(device_poll_interrupt_fd_.get(), &buf, sizeof(buf))) ==
@@ -206,7 +212,7 @@
       // No interrupt flag set, and we're reading nonblocking.  Not an error.
       return true;
     } else {
-      DPLOG(ERROR) << "ClearDevicePollInterrupt(): read() failed";
+      VPLOGF(1) << "read() failed";
       return false;
     }
   }
@@ -214,22 +220,23 @@
 }
 
 bool V4L2Device::Open(Type type, uint32_t v4l2_pixfmt) {
+  VLOGF(2);
   std::string path = GetDevicePathFor(type, v4l2_pixfmt);
 
   if (path.empty()) {
-    DVLOG(1) << "No devices supporting " << std::hex << "0x" << v4l2_pixfmt
+    VLOGF(1) << "No devices supporting " << std::hex << "0x" << v4l2_pixfmt
              << " for type: " << static_cast<int>(type);
     return false;
   }
 
   if (!OpenDevicePath(path, type)) {
-    LOG(ERROR) << "Failed opening " << path;
+    VLOGF(1) << "Failed opening " << path;
     return false;
   }
 
   device_poll_interrupt_fd_.reset(eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC));
   if (!device_poll_interrupt_fd_.is_valid()) {
-    LOG(ERROR) << "Failed creating a poll interrupt fd";
+    VLOGF(1) << "Failed creating a poll interrupt fd";
     return false;
   }
 
@@ -240,6 +247,7 @@
     int index,
     size_t num_planes,
     enum v4l2_buf_type buf_type) {
+  VLOGF(2);
   DCHECK(V4L2_TYPE_IS_MULTIPLANAR(buf_type));
 
   std::vector<base::ScopedFD> dmabuf_fds;
@@ -270,7 +278,7 @@
   const auto& devices = GetDevicesForType(type);
   for (const auto& device : devices) {
     if (!OpenDevicePath(device.first, type)) {
-      LOG(ERROR) << "Failed opening " << device.first;
+      VLOGF(1) << "Failed opening " << device.first;
       continue;
     }
 
@@ -320,15 +328,15 @@
   }
   if (max_resolution->IsEmpty()) {
     max_resolution->SetSize(1920, 1088);
-    LOG(ERROR) << "GetSupportedResolution failed to get maximum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << max_resolution->ToString();
+    VLOGF(1) << "GetSupportedResolution failed to get maximum resolution for "
+             << "fourcc " << std::hex << pixelformat
+             << ", fall back to " << max_resolution->ToString();
   }
   if (min_resolution->IsEmpty()) {
     min_resolution->SetSize(16, 16);
-    LOG(ERROR) << "GetSupportedResolution failed to get minimum resolution for "
-               << "fourcc " << std::hex << pixelformat
-               << ", fall back to " << min_resolution->ToString();
+    VLOGF(1) << "GetSupportedResolution failed to get minimum resolution for "
+             << "fourcc " << std::hex << pixelformat
+             << ", fall back to " << min_resolution->ToString();
   }
 }
 
@@ -341,8 +349,8 @@
   fmtdesc.type = buf_type;
 
   for (; Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) {
-    DVLOG(1) << "Found " << fmtdesc.description << std::hex << " (0x"
-             << fmtdesc.pixelformat << ")";
+    DVLOGF(3) << "Found " << fmtdesc.description << std::hex << " (0x"
+              << fmtdesc.pixelformat << ")";
     pixelformats.push_back(fmtdesc.pixelformat);
   }
 
@@ -373,9 +381,9 @@
       profile.profile = video_codec_profile;
       profiles.push_back(profile);
 
-      DVLOG(1) << "Found decoder profile " << GetProfileName(profile.profile)
-               << ", resolutions: " << profile.min_resolution.ToString() << " "
-               << profile.max_resolution.ToString();
+      DVLOGF(3) << "Found decoder profile " << GetProfileName(profile.profile)
+                << ", resolutions: " << profile.min_resolution.ToString() << " "
+                << profile.max_resolution.ToString();
     }
   }
 
@@ -394,6 +402,7 @@
 }
 
 void V4L2Device::CloseDevice() {
+  VLOGF(2);
   device_fd_.reset();
 }
 
@@ -433,7 +442,7 @@
     const auto& supported_pixelformats =
         EnumerateSupportedPixelformats(buf_type);
     if (!supported_pixelformats.empty()) {
-      DVLOG(1) << "Found device: " << path;
+      DVLOGF(3) << "Found device: " << path;
       devices.push_back(std::make_pair(path, supported_pixelformats));
     }
 
diff --git a/vda/v4l2_device.h b/vda/v4l2_device.h
index 41dd616..f64cbf7 100644
--- a/vda/v4l2_device.h
+++ b/vda/v4l2_device.h
@@ -5,6 +5,8 @@
 // This file defines the V4L2Device interface which is used by the
 // V4L2DecodeAccelerator class to delegate/pass the device specific
 // handling of any of the functionalities.
+// Note: ported from Chromium commit head: fb70f64
+// Note: it's also merged with generic_v4l2_device.h (head: fb70f64)
 
 #ifndef V4L2_DEVICE_H_
 #define V4L2_DEVICE_H_
diff --git a/vda/v4l2_slice_video_decode_accelerator.cc b/vda/v4l2_slice_video_decode_accelerator.cc
index 98e5915..5da373a 100644
--- a/vda/v4l2_slice_video_decode_accelerator.cc
+++ b/vda/v4l2_slice_video_decode_accelerator.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #include "v4l2_slice_video_decode_accelerator.h"
 
@@ -27,21 +28,20 @@
 #include "base/threading/thread_task_runner_handle.h"
 #include "shared_memory_region.h"
 
-#define LOGF(level) LOG(level) << __func__ << "(): "
-#define DLOGF(level) DLOG(level) << __func__ << "(): "
 #define DVLOGF(level) DVLOG(level) << __func__ << "(): "
-#define PLOGF(level) PLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
 
-#define NOTIFY_ERROR(x)                         \
-  do {                                          \
-    LOGF(ERROR) << "Setting error state:" << x; \
-    SetErrorState(x);                           \
+#define NOTIFY_ERROR(x)                       \
+  do {                                        \
+    VLOGF(1) << "Setting error state: " << x; \
+    SetErrorState(x);                         \
   } while (0)
 
 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
   do {                                                          \
     if (device_->Ioctl(type, arg) != 0) {                       \
-      PLOGF(ERROR) << "ioctl() failed: " << type_str;           \
+      VPLOGF(1) << "ioctl() failed: " << type_str;              \
       return value;                                             \
     }                                                           \
   } while (0)
@@ -52,10 +52,10 @@
 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
   IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
 
-#define IOCTL_OR_LOG_ERROR(type, arg)              \
-  do {                                             \
-    if (device_->Ioctl(type, arg) != 0)            \
-      PLOGF(ERROR) << "ioctl() failed: " << #type; \
+#define IOCTL_OR_LOG_ERROR(type, arg)           \
+  do {                                          \
+    if (device_->Ioctl(type, arg) != 0)         \
+      VPLOGF(1) << "ioctl() failed: " << #type; \
   } while (0)
 
 namespace media {
@@ -84,6 +84,11 @@
   int input_record() const { return input_record_; }
   int output_record() const { return output_record_; }
   uint32_t config_store() const { return config_store_; }
+  Rect visible_rect() const { return visible_rect_; }
+
+  void set_visible_rect(const Rect& visible_rect) {
+    visible_rect_ = visible_rect;
+  }
 
   // Take references to each reference surface and keep them until the
   // target surface is decoded.
@@ -108,6 +113,7 @@
   int input_record_;
   int output_record_;
   uint32_t config_store_;
+  Rect visible_rect_;
 
   bool decoded_;
   ReleaseCB release_cb_;
@@ -474,7 +480,7 @@
 
 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
                                                  Client* client) {
-  DVLOGF(3) << "profile: " << config.profile;
+  VLOGF(3) << "profile: " << config.profile;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kUninitialized);
 
@@ -506,8 +512,8 @@
       V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, true);
 
   if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
-    DVLOGF(1) << "Failed to open device for profile: " << config.profile
-              << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+    VLOGF(1) << "Failed to open device for profile: " << config.profile
+             << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
     return false;
   }
 
@@ -532,8 +538,8 @@
   const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
   if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
-    LOGF(ERROR) << "ioctl() failed: VIDIOC_QUERYCAP"
-                << ", caps check failed: 0x" << std::hex << caps.capabilities;
+    VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP"
+             << ", caps check failed: 0x" << std::hex << caps.capabilities;
     return false;
   }
 
@@ -541,7 +547,7 @@
     return false;
 
   if (!decoder_thread_.Start()) {
-    DLOGF(ERROR) << "device thread failed to start";
+    VLOGF(1) << "device thread failed to start";
     return false;
   }
   decoder_thread_task_runner_ = decoder_thread_.task_runner();
@@ -554,12 +560,12 @@
       FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
                             base::Unretained(this)));
 
-  DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
+  VLOGF(2) << "V4L2SliceVideoDecodeAccelerator initialized";
   return true;
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kInitialized);
 
@@ -572,7 +578,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Destroy() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   if (decoder_thread_.IsRunning()) {
@@ -585,11 +591,11 @@
   }
 
   delete this;
-  DVLOGF(3) << "Destroyed";
+  VLOGF(2) << "Destroyed";
 }
 
 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
-  DVLOGF(3);
+  DVLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   state_ = kError;
@@ -675,7 +681,7 @@
   }
 
   if (output_format_fourcc_ == 0) {
-    LOGF(ERROR) << "Could not find a usable output format";
+    VLOGF(1) << "Could not find a usable output format";
     return false;
   }
 
@@ -691,7 +697,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK(!input_streamon_);
   DCHECK(input_buffer_map_.empty());
@@ -703,7 +709,7 @@
   reqbufs.memory = V4L2_MEMORY_MMAP;
   IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
   if (reqbufs.count < kNumInputBuffers) {
-    PLOGF(ERROR) << "Could not allocate enough output buffers";
+    VLOGF(1) << "Could not allocate enough output buffers";
     return false;
   }
   input_buffer_map_.resize(reqbufs.count);
@@ -727,7 +733,7 @@
                                   MAP_SHARED,
                                   buffer.m.planes[0].m.mem_offset);
     if (address == MAP_FAILED) {
-      PLOGF(ERROR) << "mmap() failed";
+      VLOGF(1) << "mmap() failed";
       return false;
     }
     input_buffer_map_[i].address = address;
@@ -738,29 +744,29 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK(!output_streamon_);
   DCHECK(output_buffer_map_.empty());
   DCHECK(surfaces_at_display_.empty());
   DCHECK(surfaces_at_device_.empty());
 
-  visible_size_ = decoder_->GetPicSize();
+  Size pic_size = decoder_->GetPicSize();
   size_t num_pictures = decoder_->GetRequiredNumOfPictures();
 
   DCHECK_GT(num_pictures, 0u);
-  DCHECK(!visible_size_.IsEmpty());
+  DCHECK(!pic_size.IsEmpty());
 
   struct v4l2_format format;
   memset(&format, 0, sizeof(format));
   format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
   format.fmt.pix_mp.pixelformat = output_format_fourcc_;
-  format.fmt.pix_mp.width = visible_size_.width();
-  format.fmt.pix_mp.height = visible_size_.height();
+  format.fmt.pix_mp.width = pic_size.width();
+  format.fmt.pix_mp.height = pic_size.height();
   format.fmt.pix_mp.num_planes = input_planes_count_;
 
   if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0) {
-    PLOGF(ERROR) << "Failed setting format to: " << output_format_fourcc_;
+    VPLOGF(1) << "Failed setting format to: " << output_format_fourcc_;
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -770,22 +776,17 @@
   DCHECK_EQ(coded_size_.width() % 16, 0);
   DCHECK_EQ(coded_size_.height() % 16, 0);
 
-  if (!Rect(coded_size_).Contains(Rect(visible_size_))) {
-    LOGF(ERROR) << "Got invalid adjusted coded size: "
-                << coded_size_.ToString();
+  if (!Rect(coded_size_).Contains(Rect(pic_size))) {
+    VLOGF(1) << "Got invalid adjusted coded size: " << coded_size_.ToString();
     return false;
   }
 
   DVLOGF(3) << "buffer_count=" << num_pictures
-            << ", visible size=" << visible_size_.ToString()
+            << ", pic size=" << pic_size.ToString()
             << ", coded size=" << coded_size_.ToString();
 
-  // With ALLOCATE mode the client can sample it as RGB and doesn't need to
-  // know the precise format.
   VideoPixelFormat pixel_format =
-      (output_mode_ == Config::OutputMode::IMPORT)
-          ? V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)
-          : PIXEL_FORMAT_UNKNOWN;
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
 
   child_task_runner_->PostTask(
       FROM_HERE,
@@ -805,7 +806,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
          !decoder_thread_.IsRunning());
   DCHECK(!input_streamon_);
@@ -836,7 +837,7 @@
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   for (auto picture_buffer_id : picture_buffer_ids) {
-    DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id;
+    DVLOGF(4) << "dismissing PictureBuffer id=" << picture_buffer_id;
     client_->DismissPictureBuffer(picture_buffer_id);
   }
 
@@ -844,7 +845,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device) {
-  DVLOGF(4);
+  DVLOGF(3);
   DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
 
   bool event_pending;
@@ -874,7 +875,7 @@
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!device_poll_thread_.IsRunning()) {
-    DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
+    DVLOGF(4) << "Device poll thread stopped, will not schedule poll";
     return;
   }
 
@@ -891,7 +892,7 @@
       FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
                             base::Unretained(this), true));
 
-  DVLOGF(2) << "buffer counts: "
+  DVLOGF(3) << "buffer counts: "
             << "INPUT[" << decoder_input_queue_.size() << "]"
             << " => DEVICE["
             << free_input_buffers_.size() << "+"
@@ -913,13 +914,13 @@
 
   if (!EnqueueInputRecord(dec_surface->input_record(),
                           dec_surface->config_store())) {
-    DVLOGF(1) << "Failed queueing an input buffer";
+    VLOGF(1) << "Failed queueing an input buffer";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
 
   if (!EnqueueOutputRecord(dec_surface->output_record())) {
-    DVLOGF(1) << "Failed queueing an output buffer";
+    VLOGF(1) << "Failed queueing an output buffer";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
@@ -935,7 +936,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   struct v4l2_buffer dqbuf;
@@ -953,7 +954,7 @@
         // EAGAIN if we're just out of buffers to dequeue.
         break;
       }
-      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
       NOTIFY_ERROR(PLATFORM_FAILURE);
       return;
     }
@@ -981,7 +982,7 @@
         // EAGAIN if we're just out of buffers to dequeue.
         break;
       }
-      PLOGF(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
+      VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
       NOTIFY_ERROR(PLATFORM_FAILURE);
       return;
     }
@@ -989,13 +990,13 @@
     DCHECK(output_record.at_device);
     output_record.at_device = false;
     output_buffer_queued_count_--;
-    DVLOGF(3) << "Dequeued output=" << dqbuf.index
-              << " count " << output_buffer_queued_count_;
+    DVLOGF(4) << "Dequeued output=" << dqbuf.index << " count "
+              << output_buffer_queued_count_;
 
     V4L2DecodeSurfaceByOutputId::iterator it =
         surfaces_at_device_.find(dqbuf.index);
     if (it == surfaces_at_device_.end()) {
-      DLOGF(ERROR) << "Got invalid surface from device.";
+      VLOGF(1) << "Got invalid surface from device.";
       NOTIFY_ERROR(PLATFORM_FAILURE);
     }
 
@@ -1090,7 +1091,7 @@
 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
     int index,
     uint32_t config_store) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
   DCHECK_GT(config_store, 0u);
 
@@ -1118,7 +1119,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
 
   // Enqueue an output (VIDEO_CAPTURE) buffer.
@@ -1161,7 +1162,7 @@
 
   // Start up the device poll thread and schedule its first DevicePollTask().
   if (!device_poll_thread_.Start()) {
-    DLOGF(ERROR) << "Device thread failed to start";
+    VLOGF(1) << "Device thread failed to start";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -1191,7 +1192,7 @@
 
   // Signal the DevicePollTask() to stop, and stop the device poll thread.
   if (!device_->SetDevicePollInterrupt()) {
-    PLOGF(ERROR) << "SetDevicePollInterrupt(): failed";
+    VPLOGF(1) << "SetDevicePollInterrupt(): failed";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return false;
   }
@@ -1253,12 +1254,12 @@
 
 void V4L2SliceVideoDecodeAccelerator::Decode(
     const BitstreamBuffer& bitstream_buffer) {
-  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
             << ", size=" << bitstream_buffer.size();
   DCHECK(decode_task_runner_->BelongsToCurrentThread());
 
   if (bitstream_buffer.id() < 0) {
-    LOGF(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+    VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
     if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
       base::SharedMemory::CloseHandle(bitstream_buffer.handle());
     NOTIFY_ERROR(INVALID_ARGUMENT);
@@ -1272,7 +1273,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
     const BitstreamBuffer& bitstream_buffer) {
-  DVLOGF(3) << "input_id=" << bitstream_buffer.id()
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
             << " size=" << bitstream_buffer.size();
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
@@ -1285,11 +1286,11 @@
     return;
 
   if (!bitstream_record->shm->Map()) {
-    LOGF(ERROR) << "Could not map bitstream_buffer";
+    VLOGF(1) << "Could not map bitstream_buffer";
     NOTIFY_ERROR(UNREADABLE_INPUT);
     return;
   }
-  DVLOGF(3) << "mapped at=" << bitstream_record->shm->memory();
+  DVLOGF(4) << "mapped at=" << bitstream_record->shm->memory();
 
   decoder_input_queue_.push(
       linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
@@ -1333,7 +1334,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (state_ != kDecoding) {
@@ -1346,7 +1347,7 @@
     res = decoder_->Decode();
     switch (res) {
       case AcceleratedVideoDecoder::kAllocateNewSurfaces:
-        DVLOGF(2) << "Decoder requesting a new set of surfaces";
+        VLOGF(2) << "Decoder requesting a new set of surfaces";
         InitiateSurfaceSetChange();
         return;
 
@@ -1367,7 +1368,7 @@
         return;
 
       case AcceleratedVideoDecoder::kDecodeError:
-        DVLOGF(1) << "Error decoding stream";
+        VLOGF(1) << "Error decoding stream";
         NOTIFY_ERROR(PLATFORM_FAILURE);
         return;
     }
@@ -1375,7 +1376,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
-  DVLOGF(2);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kDecoding);
 
@@ -1385,7 +1386,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() {
-  DVLOGF(2);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!surface_set_change_pending_)
@@ -1408,6 +1409,12 @@
     return false;
   }
 
+  // Dequeued decoded surfaces may be pended in pending_picture_ready_ if they
+  // are waiting for some pictures to be cleared. We should post them right away
+  // because they are about to be dismissed and destroyed for surface set
+  // change.
+  SendPictureReady();
+
   // This will return only once all buffers are dismissed and destroyed.
   // This does not wait until they are displayed however, as display retains
   // references to the buffers bound to textures and will release them
@@ -1423,12 +1430,12 @@
   }
 
   surface_set_change_pending_ = false;
-  DVLOGF(3) << "Surface set change finished";
+  VLOGF(2) << "Surface set change finished";
   return true;
 }
 
 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   std::vector<int32_t> picture_buffers_to_dismiss;
 
@@ -1441,7 +1448,7 @@
   }
 
   if (dismiss) {
-    DVLOGF(2) << "Scheduling picture dismissal";
+    VLOGF(2) << "Scheduling picture dismissal";
     base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
                              base::WaitableEvent::InitialState::NOT_SIGNALED);
     child_task_runner_->PostTask(
@@ -1456,7 +1463,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
          !decoder_thread_.IsRunning());
   DCHECK(!output_streamon_);
@@ -1498,7 +1505,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
     const std::vector<PictureBuffer>& buffers) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1509,16 +1516,16 @@
 
 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
     const std::vector<PictureBuffer>& buffers) {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(state_, kAwaitingPictureBuffers);
 
   const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures();
 
   if (buffers.size() < req_buffer_count) {
-    DLOG(ERROR) << "Failed to provide requested picture buffers. "
-                << "(Got " << buffers.size()
-                << ", requested " << req_buffer_count << ")";
+    VLOGF(1) << "Failed to provide requested picture buffers. "
+             << "(Got " << buffers.size() << ", requested " << req_buffer_count
+             << ")";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1534,7 +1541,7 @@
   IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
 
   if (reqbufs.count != buffers.size()) {
-    DLOGF(ERROR) << "Could not allocate enough output buffers";
+    VLOGF(1) << "Could not allocate enough output buffers";
     NOTIFY_ERROR(PLATFORM_FAILURE);
     return;
   }
@@ -1587,18 +1594,26 @@
 
 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
     int32_t picture_buffer_id,
-    const std::vector<base::FileDescriptor>& dmabuf_fds) {
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
   DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>()));
-  for (const auto& fd : dmabuf_fds) {
+  for (const auto& fd : native_pixmap_handle.fds) {
     DCHECK_NE(fd.fd, -1);
     passed_dmabuf_fds->push_back(base::ScopedFD(fd.fd));
   }
 
   if (output_mode_ != Config::OutputMode::IMPORT) {
-    LOGF(ERROR) << "Cannot import in non-import mode";
+    VLOGF(1) << "Cannot import in non-import mode";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  if (pixel_format !=
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)) {
+    VLOGF(1) << "Unsupported import format: " << pixel_format;
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1632,7 +1647,7 @@
   }
 
   if (!iter->at_client) {
-    LOGF(ERROR) << "Cannot import buffer that not owned by client";
+    VLOGF(1) << "Cannot import buffer that not owned by client";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1664,7 +1679,7 @@
 
 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
     int32_t picture_buffer_id) {
-  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   V4L2DecodeSurfaceByPictureBufferId::iterator it =
@@ -1682,7 +1697,7 @@
 
   OutputRecord& output_record = output_buffer_map_[it->second->output_record()];
   if (output_record.at_device || !output_record.at_client) {
-    DVLOGF(1) << "picture_buffer_id not reusable";
+    VLOGF(1) << "picture_buffer_id not reusable";
     NOTIFY_ERROR(INVALID_ARGUMENT);
     return;
   }
@@ -1694,7 +1709,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Flush() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1703,7 +1718,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   // Queue an empty buffer which - when reached - will trigger flush sequence.
@@ -1715,7 +1730,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   // This will trigger output for all remaining surfaces in the decoder.
@@ -1736,7 +1751,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishFlush() {
-  DVLOGF(3);
+  VLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!decoder_flushing_)
@@ -1762,7 +1777,7 @@
   SendPictureReady();
 
   decoder_flushing_ = false;
-  DVLOGF(3) << "Flush finished";
+  VLOGF(2) << "Flush finished";
 
   child_task_runner_->PostTask(FROM_HERE,
                                base::Bind(&Client::NotifyFlushDone, client_));
@@ -1771,7 +1786,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::Reset() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(child_task_runner_->BelongsToCurrentThread());
 
   decoder_thread_task_runner_->PostTask(
@@ -1780,7 +1795,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
-  DVLOGF(3);
+  VLOGF(2);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (decoder_resetting_) {
@@ -1803,7 +1818,7 @@
 }
 
 bool V4L2SliceVideoDecodeAccelerator::FinishReset() {
-  DVLOGF(3);
+  VLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   if (!decoder_resetting_)
@@ -1834,7 +1849,7 @@
   }
 
   decoder_resetting_ = false;
-  DVLOGF(3) << "Reset finished";
+  VLOGF(2) << "Reset finished";
 
   child_task_runner_->PostTask(FROM_HERE,
                                base::Bind(&Client::NotifyResetDone, client_));
@@ -1898,7 +1913,7 @@
   size_t i = 0;
   for (const auto& pic : dpb) {
     if (i >= arraysize(v4l2_decode_param_.dpb)) {
-      DVLOGF(1) << "Invalid DPB size";
+      VLOGF(1) << "Invalid DPB size";
       break;
     }
 
@@ -2108,7 +2123,7 @@
     const uint8_t* data,
     size_t size) {
   if (num_slices_ == kMaxSlices) {
-    LOGF(ERROR) << "Over limit of supported slices per frame";
+    VLOGF(1) << "Over limit of supported slices per frame";
     return false;
   }
 
@@ -2228,7 +2243,7 @@
   InputRecord& input_record = input_buffer_map_[index];
 
   if (input_record.bytes_used + size > input_record.length) {
-    DVLOGF(1) << "Input buffer too small";
+    VLOGF(1) << "Input buffer too small";
     return false;
   }
 
@@ -2306,6 +2321,7 @@
     const scoped_refptr<H264Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       H264PictureToV4L2DecodeSurface(pic);
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2537,7 +2553,7 @@
     const scoped_refptr<VP8Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP8PictureToV4L2DecodeSurface(pic);
-
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2840,7 +2856,7 @@
     const scoped_refptr<VP9Picture>& pic) {
   scoped_refptr<V4L2DecodeSurface> dec_surface =
       VP9PictureToV4L2DecodeSurface(pic);
-
+  dec_surface->set_visible_rect(pic->visible_rect);
   v4l2_dec_->SurfaceReady(dec_surface);
   return true;
 }
@@ -2920,13 +2936,13 @@
     const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
-  DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
+  DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
   Enqueue(dec_surface);
 }
 
 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
     const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
 
   decoder_display_queue_.push(dec_surface);
@@ -2964,14 +2980,12 @@
   DCHECK_NE(output_record.picture_id, -1);
   output_record.at_client = true;
 
-  // TODO(posciak): Use visible size from decoder here instead
-  // (crbug.com/402760). Passing (0, 0) results in the client using the
-  // visible size extracted from the container instead.
   Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
-                  Rect(0, 0), false);
-  DVLOGF(3) << dec_surface->ToString()
+                  dec_surface->visible_rect(), true /* allow_overlay */);
+  DVLOGF(4) << dec_surface->ToString()
             << ", bitstream_id: " << picture.bitstream_buffer_id()
-            << ", picture_id: " << picture.picture_buffer_id();
+            << ", picture_id: " << picture.picture_buffer_id()
+            << ", visible_rect: " << picture.visible_rect().ToString();
   pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
   SendPictureReady();
   output_record.cleared = true;
@@ -3006,9 +3020,10 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
-  DVLOGF(3);
+  DVLOGF(4);
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
-  bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_);
+  bool send_now =
+      (decoder_resetting_ || decoder_flushing_ || surface_set_change_pending_);
   while (!pending_picture_ready_.empty()) {
     bool cleared = pending_picture_ready_.front().cleared;
     const Picture& picture = pending_picture_ready_.front().picture;
@@ -3022,17 +3037,20 @@
           FROM_HERE,
           base::Bind(&Client::PictureReady, decode_client_, picture));
       pending_picture_ready_.pop();
-    } else if (!cleared || resetting_or_flushing) {
-      DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
+    } else if (!cleared || send_now) {
+      DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared
                 << ", decoder_resetting_=" << decoder_resetting_
                 << ", decoder_flushing_=" << decoder_flushing_
+                << ", surface_set_change_pending_="
+                << surface_set_change_pending_
                 << ", picture_clearing_count_=" << picture_clearing_count_;
       DVLOGF(4) << "Posting picture ready to GPU for: "
                 << picture.picture_buffer_id();
       // If the picture is not cleared, post it to the child thread because it
       // has to be cleared in the child thread. A picture only needs to be
-      // cleared once. If the decoder is resetting or flushing, send all
-      // pictures to ensure PictureReady arrive before reset or flush done.
+      // cleared once. If the decoder is resetting or flushing or changing
+      // resolution, send all pictures to ensure PictureReady arrive before
+      // reset done, flush done, or picture dismissed.
       child_task_runner_->PostTaskAndReply(
           FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
           // Unretained is safe. If Client::PictureReady gets to run, |this| is
@@ -3051,7 +3069,7 @@
 }
 
 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
-  DVLOGF(3) << "clearing count=" << picture_clearing_count_;
+  DVLOGF(4) << "clearing count=" << picture_clearing_count_;
   DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
   DCHECK_GT(picture_clearing_count_, 0);
   picture_clearing_count_--;
diff --git a/vda/v4l2_slice_video_decode_accelerator.h b/vda/v4l2_slice_video_decode_accelerator.h
index 929066f..8849625 100644
--- a/vda/v4l2_slice_video_decode_accelerator.h
+++ b/vda/v4l2_slice_video_decode_accelerator.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #ifndef V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
 #define V4L2_SLICE_VIDEO_DECODE_ACCELERATOR_H_
@@ -47,7 +48,8 @@
   void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
   void ImportBufferForPicture(
       int32_t picture_buffer_id,
-      const std::vector<base::FileDescriptor>& dmabuf_fds) override;
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle) override;
   void ReusePictureBuffer(int32_t picture_buffer_id) override;
   void Flush() override;
   void Reset() override;
@@ -77,6 +79,7 @@
   // Record for output buffers.
   struct OutputRecord {
     OutputRecord();
+    OutputRecord(OutputRecord&&) = default;
     bool at_device;
     bool at_client;
     int32_t picture_id;
@@ -234,9 +237,9 @@
   // file descriptors.
   void ImportBufferForPictureTask(
       int32_t picture_buffer_id,
-      // TODO(posciak): (crbug.com/561749) we should normally be able to pass
-      // the vector by itself via std::move, but it's not possible to do this
-      // if this method is used as a callback.
+      // TODO(posciak): (https://crbug.com/561749) we should normally be able to
+      // pass the vector by itself via std::move, but it's not possible to do
+      // this if this method is used as a callback.
       std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds);
 
   // Performed on decoder_thread_ as a consequence of poll() on decoder_thread_
@@ -366,7 +369,6 @@
   VideoCodecProfile video_profile_;
   uint32_t input_format_fourcc_;
   uint32_t output_format_fourcc_;
-  Size visible_size_;
   Size coded_size_;
 
   struct BitstreamBufferRef;
@@ -417,6 +419,7 @@
     bool cleared;  // Whether the texture is cleared and safe to render from.
     Picture picture;  // The decoded picture.
   };
+
   // Pictures that are ready but not sent to PictureReady yet.
   std::queue<PictureRecord> pending_picture_ready_;
 
diff --git a/vda/v4l2_video_decode_accelerator.cc b/vda/v4l2_video_decode_accelerator.cc
new file mode 100644
index 0000000..96f840d
--- /dev/null
+++ b/vda/v4l2_video_decode_accelerator.cc
@@ -0,0 +1,2088 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Note: ported from Chromium commit head: 91175b1
+// Note: image processor is not ported.
+
+#include "v4l2_video_decode_accelerator.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "h264_parser.h"
+#include "rect.h"
+#include "shared_memory_region.h"
+#include "videodev2.h"
+
+#define DVLOGF(level) DVLOG(level) << __func__ << "(): "
+#define VLOGF(level) VLOG(level) << __func__ << "(): "
+#define VPLOGF(level) VPLOG(level) << __func__ << "(): "
+
+#define NOTIFY_ERROR(x)                      \
+  do {                                       \
+    VLOGF(1) << "Setting error state:" << x; \
+    SetErrorState(x);                        \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
+  do {                                                          \
+    if (device_->Ioctl(type, arg) != 0) {                       \
+      VPLOGF(1) << "ioctl() failed: " << type_str;              \
+      NOTIFY_ERROR(PLATFORM_FAILURE);                           \
+      return value;                                             \
+    }                                                           \
+  } while (0)
+
+#define IOCTL_OR_ERROR_RETURN(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
+
+#define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
+  IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
+
+#define IOCTL_OR_LOG_ERROR(type, arg)           \
+  do {                                          \
+    if (device_->Ioctl(type, arg) != 0)         \
+      VPLOGF(1) << "ioctl() failed: " << #type; \
+  } while (0)
+
+namespace media {
+
+// static
+const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = {
+    V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9,
+};
+
+struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
+  BitstreamBufferRef(
+      base::WeakPtr<Client>& client,
+      scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+      std::unique_ptr<SharedMemoryRegion> shm,
+      int32_t input_id);
+  ~BitstreamBufferRef();
+  const base::WeakPtr<Client> client;
+  const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
+  const std::unique_ptr<SharedMemoryRegion> shm;
+  size_t bytes_used;
+  const int32_t input_id;
+};
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
+    base::WeakPtr<Client>& client,
+    scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
+    std::unique_ptr<SharedMemoryRegion> shm,
+    int32_t input_id)
+    : client(client),
+      client_task_runner(client_task_runner),
+      shm(std::move(shm)),
+      bytes_used(0),
+      input_id(input_id) {}
+
+V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
+  if (input_id >= 0) {
+    client_task_runner->PostTask(
+        FROM_HERE,
+        base::Bind(&Client::NotifyEndOfBitstreamBuffer, client, input_id));
+  }
+}
+
+V4L2VideoDecodeAccelerator::InputRecord::InputRecord()
+    : at_device(false), address(NULL), length(0), bytes_used(0), input_id(-1) {}
+
+V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {}
+
+V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
+    : state(kFree),
+      picture_id(-1),
+      cleared(false) {}
+
+V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord(bool cleared,
+                                                         const Picture& picture)
+    : cleared(cleared), picture(picture) {}
+
+V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
+
+V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
+    const scoped_refptr<V4L2Device>& device)
+    : child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+      decoder_thread_("V4L2DecoderThread"),
+      decoder_state_(kUninitialized),
+      output_mode_(Config::OutputMode::ALLOCATE),
+      device_(device),
+      decoder_delay_bitstream_buffer_id_(-1),
+      decoder_current_input_buffer_(-1),
+      decoder_decode_buffer_tasks_scheduled_(0),
+      decoder_frames_at_client_(0),
+      decoder_flushing_(false),
+      decoder_cmd_supported_(false),
+      flush_awaiting_last_output_buffer_(false),
+      reset_pending_(false),
+      decoder_partial_frame_pending_(false),
+      input_streamon_(false),
+      input_buffer_queued_count_(0),
+      output_streamon_(false),
+      output_buffer_queued_count_(0),
+      output_dpb_size_(0),
+      output_planes_count_(0),
+      picture_clearing_count_(0),
+      device_poll_thread_("V4L2DevicePollThread"),
+      video_profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
+      input_format_fourcc_(0),
+      output_format_fourcc_(0),
+      weak_this_factory_(this) {
+  weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() {
+  DCHECK(!decoder_thread_.IsRunning());
+  DCHECK(!device_poll_thread_.IsRunning());
+  DVLOGF(2);
+
+  // These maps have members that should be manually destroyed, e.g. file
+  // descriptors, mmap() segments, etc.
+  DCHECK(input_buffer_map_.empty());
+  DCHECK(output_buffer_map_.empty());
+}
+
+bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
+                                            Client* client) {
+  VLOGF(2) << "profile: " << config.profile
+           << ", output_mode=" << static_cast<int>(config.output_mode);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kUninitialized);
+
+  if (config.output_mode != Config::OutputMode::IMPORT) {
+    NOTREACHED() << "Only IMPORT OutputModes are supported";
+    return false;
+  }
+
+  client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
+  client_ = client_ptr_factory_->GetWeakPtr();
+  // If we haven't been set up to decode on separate thread via
+  // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
+  // decode tasks.
+  if (!decode_task_runner_) {
+    decode_task_runner_ = child_task_runner_;
+    DCHECK(!decode_client_);
+    decode_client_ = client_;
+  }
+
+  video_profile_ = config.profile;
+
+  input_format_fourcc_ =
+      V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false);
+
+  if (!device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc_)) {
+    VLOGF(1) << "Failed to open device for profile: " << config.profile
+             << " fourcc: " << std::hex << "0x" << input_format_fourcc_;
+    return false;
+  }
+
+  // Capabilities check.
+  struct v4l2_capability caps;
+  const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
+  if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+    VLOGF(1) << "ioctl() failed: VIDIOC_QUERYCAP"
+             << ", caps check failed: 0x" << std::hex << caps.capabilities;
+    return false;
+  }
+
+  if (!SetupFormats())
+    return false;
+
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
+  }
+
+  if (!decoder_thread_.Start()) {
+    VLOGF(1) << "decoder thread failed to start";
+    return false;
+  }
+
+  decoder_state_ = kInitialized;
+  output_mode_ = config.output_mode;
+
+  // InitializeTask will NOTIFY_ERROR on failure.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::InitializeTask,
+                            base::Unretained(this)));
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::InitializeTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kInitialized);
+
+  // Subscribe to the resolution change event.
+  struct v4l2_event_subscription sub;
+  memset(&sub, 0, sizeof(sub));
+  sub.type = V4L2_EVENT_SOURCE_CHANGE;
+  IOCTL_OR_ERROR_RETURN(VIDIOC_SUBSCRIBE_EVENT, &sub);
+
+  if (!CreateInputBuffers()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  decoder_cmd_supported_ = IsDecoderCmdSupported();
+
+  if (!StartDevicePoll())
+    return;
+}
+
+void V4L2VideoDecodeAccelerator::Decode(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id()
+            << ", size=" << bitstream_buffer.size();
+  DCHECK(decode_task_runner_->BelongsToCurrentThread());
+
+  if (bitstream_buffer.id() < 0) {
+    VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
+    if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
+      base::SharedMemory::CloseHandle(bitstream_buffer.handle());
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  // DecodeTask() will take care of running a DecodeBufferTask().
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeTask,
+                            base::Unretained(this), bitstream_buffer));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffers(
+    const std::vector<PictureBuffer>& buffers) {
+  VLOGF(2) << "buffer_count=" << buffers.size();
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2VideoDecodeAccelerator::AssignPictureBuffersTask,
+                 base::Unretained(this), buffers));
+}
+
+void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
+    const std::vector<PictureBuffer>& buffers) {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kAwaitingPictureBuffers);
+
+  uint32_t req_buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+  if (buffers.size() < req_buffer_count) {
+    VLOGF(1) << "Failed to provide requested picture buffers. (Got "
+             << buffers.size() << ", requested " << req_buffer_count << ")";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  // Allocate the output buffers.
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = buffers.size();
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
+
+  if (reqbufs.count != buffers.size()) {
+    VLOGF(1) << "Could not allocate enough output buffers";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  DCHECK(free_output_buffers_.empty());
+  DCHECK(output_buffer_map_.empty());
+  output_buffer_map_.resize(buffers.size());
+
+  // Always use IMPORT output mode for Android solution.
+  DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    OutputRecord& output_record = output_buffer_map_[i];
+    DCHECK_EQ(output_record.state, kFree);
+    DCHECK_EQ(output_record.picture_id, -1);
+    DCHECK_EQ(output_record.cleared, false);
+
+    output_record.picture_id = buffers[i].id();
+
+    // This will remain kAtClient until ImportBufferForPicture is called, either
+    // by the client, or by ourselves, if we are allocating.
+    output_record.state = kAtClient;
+
+    DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPicture(
+    int32_t picture_buffer_id,
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  if (output_mode_ != Config::OutputMode::IMPORT) {
+    VLOGF(1) << "Cannot import in non-import mode";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  if (pixel_format !=
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_)) {
+    VLOGF(1) << "Unsupported import format: " << pixel_format;
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  std::vector<base::ScopedFD> dmabuf_fds;
+  for (const auto& fd : native_pixmap_handle.fds) {
+    DCHECK_NE(fd.fd, -1);
+    dmabuf_fds.push_back(base::ScopedFD(fd.fd));
+  }
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE,
+      base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask,
+                 base::Unretained(this), picture_buffer_id,
+                 base::Passed(&dmabuf_fds)));
+}
+
+void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
+    int32_t picture_buffer_id,
+    std::vector<base::ScopedFD> dmabuf_fds) {
+  DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
+            << ", dmabuf_fds.size()=" << dmabuf_fds.size();
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  const auto iter =
+      std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+                   [picture_buffer_id](const OutputRecord& output_record) {
+                     return output_record.picture_id == picture_buffer_id;
+                   });
+  if (iter == output_buffer_map_.end()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ImportBufferForPicture was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that).
+    DVLOGF(3) << "got picture id=" << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  if (iter->state != kAtClient) {
+    VLOGF(1) << "Cannot import buffer not owned by client";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  size_t index = iter - output_buffer_map_.begin();
+  DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+                       index),
+            0);
+
+  iter->state = kFree;
+
+  DCHECK_EQ(output_planes_count_, dmabuf_fds.size());
+
+  iter->processor_output_fds.swap(dmabuf_fds);
+  free_output_buffers_.push_back(index);
+  if (decoder_state_ != kChangingResolution) {
+      Enqueue();
+      ScheduleDecodeBufferTaskIfNeeded();
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+  // Must be run on child thread, as we'll insert a sync in the EGL context.
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
+                            base::Unretained(this), picture_buffer_id));
+}
+
+void V4L2VideoDecodeAccelerator::Flush() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::FlushTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Reset() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::Destroy() {
+  VLOGF(2);
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+
+  // We're destroying; cancel all callbacks.
+  client_ptr_factory_.reset();
+  weak_this_factory_.InvalidateWeakPtrs();
+
+  // If the decoder thread is running, destroy using posted task.
+  if (decoder_thread_.IsRunning()) {
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DestroyTask,
+                              base::Unretained(this)));
+    // DestroyTask() will cause the decoder_thread_ to flush all tasks.
+    decoder_thread_.Stop();
+  } else {
+    // Otherwise, call the destroy task directly.
+    DestroyTask();
+  }
+
+  delete this;
+  VLOGF(2) << "Destroyed.";
+}
+
+bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
+    const base::WeakPtr<Client>& decode_client,
+    const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
+  VLOGF(2);
+  decode_client_ = decode_client;
+  decode_task_runner_ = decode_task_runner;
+  return true;
+}
+
+// static
+VideoDecodeAccelerator::SupportedProfiles
+V4L2VideoDecodeAccelerator::GetSupportedProfiles() {
+    scoped_refptr<V4L2Device> device(new V4L2Device());
+  if (!device)
+    return SupportedProfiles();
+
+  return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
+                                            supported_input_fourccs_);
+}
+
+void V4L2VideoDecodeAccelerator::DecodeTask(
+    const BitstreamBuffer& bitstream_buffer) {
+  DVLOGF(4) << "input_id=" << bitstream_buffer.id();
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
+      decode_client_, decode_task_runner_,
+      std::unique_ptr<SharedMemoryRegion>(
+          new SharedMemoryRegion(bitstream_buffer, true)),
+      bitstream_buffer.id()));
+
+  // Skip empty buffer.
+  if (bitstream_buffer.size() == 0)
+    return;
+
+  if (!bitstream_record->shm->Map()) {
+    VLOGF(1) << "could not map bitstream_buffer";
+    NOTIFY_ERROR(UNREADABLE_INPUT);
+    return;
+  }
+  DVLOGF(4) << "mapped at=" << bitstream_record->shm->memory();
+
+  if (decoder_state_ == kResetting || decoder_flushing_) {
+    // In the case that we're resetting or flushing, we need to delay decoding
+    // the BitstreamBuffers that come after the Reset() or Flush() call.  When
+    // we're here, we know that this DecodeTask() was scheduled by a Decode()
+    // call that came after (in the client thread) the Reset() or Flush() call;
+    // thus set up the delay if necessary.
+    if (decoder_delay_bitstream_buffer_id_ == -1)
+      decoder_delay_bitstream_buffer_id_ = bitstream_record->input_id;
+  } else if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
+  decoder_decode_buffer_tasks_scheduled_++;
+  DecodeBufferTask();
+}
+
+void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  decoder_decode_buffer_tasks_scheduled_--;
+
+  if (decoder_state_ != kInitialized && decoder_state_ != kDecoding) {
+    DVLOGF(3) << "early out: state=" << decoder_state_;
+    return;
+  }
+
+  if (decoder_current_bitstream_buffer_ == NULL) {
+    if (decoder_input_queue_.empty()) {
+      // We're waiting for a new buffer -- exit without scheduling a new task.
+      return;
+    }
+    linked_ptr<BitstreamBufferRef>& buffer_ref = decoder_input_queue_.front();
+    if (decoder_delay_bitstream_buffer_id_ == buffer_ref->input_id) {
+      // We're asked to delay decoding on this and subsequent buffers.
+      return;
+    }
+
+    // Setup to use the next buffer.
+    decoder_current_bitstream_buffer_.reset(buffer_ref.release());
+    decoder_input_queue_.pop();
+    const auto& shm = decoder_current_bitstream_buffer_->shm;
+    if (shm) {
+      DVLOGF(4) << "reading input_id="
+                << decoder_current_bitstream_buffer_->input_id
+                << ", addr=" << shm->memory() << ", size=" << shm->size();
+    } else {
+      DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+      DVLOGF(4) << "reading input_id=kFlushBufferId";
+    }
+  }
+  bool schedule_task = false;
+  size_t decoded_size = 0;
+  const auto& shm = decoder_current_bitstream_buffer_->shm;
+  if (!shm) {
+    // This is a dummy buffer, queued to flush the pipe.  Flush.
+    DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId);
+    // Enqueue a buffer guaranteed to be empty.  To do that, we flush the
+    // current input, enqueue no data to the next frame, then flush that down.
+    schedule_task = true;
+    if (decoder_current_input_buffer_ != -1 &&
+        input_buffer_map_[decoder_current_input_buffer_].input_id !=
+            kFlushBufferId)
+      schedule_task = FlushInputFrame();
+
+    if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
+      VLOGF(2) << "enqueued flush buffer";
+      decoder_partial_frame_pending_ = false;
+      schedule_task = true;
+    } else {
+      // If we failed to enqueue the empty buffer (due to pipeline
+      // backpressure), don't advance the bitstream buffer queue, and don't
+      // schedule the next task.  This bitstream buffer queue entry will get
+      // reprocessed when the pipeline frees up.
+      schedule_task = false;
+    }
+  } else if (shm->size() == 0) {
+    // This is a buffer queued from the client that has zero size.  Skip.
+    schedule_task = true;
+  } else {
+    // This is a buffer queued from the client, with actual contents.  Decode.
+    const uint8_t* const data =
+        reinterpret_cast<const uint8_t*>(shm->memory()) +
+        decoder_current_bitstream_buffer_->bytes_used;
+    const size_t data_size =
+        shm->size() - decoder_current_bitstream_buffer_->bytes_used;
+    if (!AdvanceFrameFragment(data, data_size, &decoded_size)) {
+      NOTIFY_ERROR(UNREADABLE_INPUT);
+      return;
+    }
+    // AdvanceFrameFragment should not return a size larger than the buffer
+    // size, even on invalid data.
+    CHECK_LE(decoded_size, data_size);
+
+    switch (decoder_state_) {
+      case kInitialized:
+        schedule_task = DecodeBufferInitial(data, decoded_size, &decoded_size);
+        break;
+      case kDecoding:
+        schedule_task = DecodeBufferContinue(data, decoded_size);
+        break;
+      default:
+        NOTIFY_ERROR(ILLEGAL_STATE);
+        return;
+    }
+  }
+  if (decoder_state_ == kError) {
+    // Failed during decode.
+    return;
+  }
+
+  if (schedule_task) {
+    decoder_current_bitstream_buffer_->bytes_used += decoded_size;
+    if ((shm ? shm->size() : 0) ==
+        decoder_current_bitstream_buffer_->bytes_used) {
+      // Our current bitstream buffer is done; return it.
+      int32_t input_id = decoder_current_bitstream_buffer_->input_id;
+      DVLOGF(4) << "finished input_id=" << input_id;
+      // BitstreamBufferRef destructor calls NotifyEndOfBitstreamBuffer().
+      decoder_current_bitstream_buffer_.reset();
+    }
+    ScheduleDecodeBufferTaskIfNeeded();
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::AdvanceFrameFragment(const uint8_t* data,
+                                                      size_t size,
+                                                      size_t* endpos) {
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    // For H264, we need to feed HW one frame at a time.  This is going to take
+    // some parsing of our input stream.
+    decoder_h264_parser_->SetStream(data, size);
+    H264NALU nalu;
+    H264Parser::Result result;
+    *endpos = 0;
+
+    // Keep on peeking the next NALs while they don't indicate a frame
+    // boundary.
+    for (;;) {
+      bool end_of_frame = false;
+      result = decoder_h264_parser_->AdvanceToNextNALU(&nalu);
+      if (result == H264Parser::kInvalidStream ||
+          result == H264Parser::kUnsupportedStream)
+        return false;
+      if (result == H264Parser::kEOStream) {
+        // We've reached the end of the buffer before finding a frame boundary.
+        decoder_partial_frame_pending_ = true;
+        *endpos = size;
+        return true;
+      }
+      switch (nalu.nal_unit_type) {
+        case H264NALU::kNonIDRSlice:
+        case H264NALU::kIDRSlice:
+          if (nalu.size < 1)
+            return false;
+          // For these two, if the "first_mb_in_slice" field is zero, start a
+          // new frame and return.  This field is Exp-Golomb coded starting on
+          // the eighth data bit of the NAL; a zero value is encoded with a
+          // leading '1' bit in the byte, which we can detect as the byte being
+          // (unsigned) greater than or equal to 0x80.
+          if (nalu.data[1] >= 0x80) {
+            end_of_frame = true;
+            break;
+          }
+          break;
+        case H264NALU::kSEIMessage:
+        case H264NALU::kSPS:
+        case H264NALU::kPPS:
+        case H264NALU::kAUD:
+        case H264NALU::kEOSeq:
+        case H264NALU::kEOStream:
+        case H264NALU::kReserved14:
+        case H264NALU::kReserved15:
+        case H264NALU::kReserved16:
+        case H264NALU::kReserved17:
+        case H264NALU::kReserved18:
+          // These unconditionally signal a frame boundary.
+          end_of_frame = true;
+          break;
+        default:
+          // For all others, keep going.
+          break;
+      }
+      if (end_of_frame) {
+        if (!decoder_partial_frame_pending_ && *endpos == 0) {
+          // The frame was previously restarted, and we haven't filled the
+          // current frame with any contents yet.  Start the new frame here and
+          // continue parsing NALs.
+        } else {
+          // The frame wasn't previously restarted and/or we have contents for
+          // the current frame; signal the start of a new frame here: we don't
+          // have a partial frame anymore.
+          decoder_partial_frame_pending_ = false;
+          return true;
+        }
+      }
+      *endpos = (nalu.data + nalu.size) - data;
+    }
+    NOTREACHED();
+    return false;
+  } else {
+    DCHECK_GE(video_profile_, VP8PROFILE_MIN);
+    DCHECK_LE(video_profile_, VP9PROFILE_MAX);
+    // For VP8/9, we can just dump the entire buffer.  No fragmentation needed,
+    // and we never return a partial frame.
+    *endpos = size;
+    decoder_partial_frame_pending_ = false;
+    return true;
+  }
+}
+
+void V4L2VideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // If we're behind on tasks, schedule another one.
+  int buffers_to_decode = decoder_input_queue_.size();
+  if (decoder_current_bitstream_buffer_ != NULL)
+    buffers_to_decode++;
+  if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) {
+    decoder_decode_buffer_tasks_scheduled_++;
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DecodeBufferTask,
+                              base::Unretained(this)));
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferInitial(const void* data,
+                                                     size_t size,
+                                                     size_t* endpos) {
+  DVLOGF(3) << "data=" << data << ", size=" << size;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kInitialized);
+  // Initial decode.  We haven't been able to get output stream format info yet.
+  // Get it, and start decoding.
+
+  // Copy in and send to HW.
+  if (!AppendToInputFrame(data, size))
+    return false;
+
+  // If we only have a partial frame, don't flush and process yet.
+  if (decoder_partial_frame_pending_)
+    return true;
+
+  if (!FlushInputFrame())
+    return false;
+
+  // Recycle buffers.
+  Dequeue();
+
+  *endpos = size;
+
+  // If an initial resolution change event is not done yet, a driver probably
+  // needs more stream to decode format.
+  // Return true and schedule next buffer without changing status to kDecoding.
+  // If the initial resolution change is done and coded size is known, we may
+  // still have to wait for AssignPictureBuffers() and output buffers to be
+  // allocated.
+  if (coded_size_.IsEmpty() || output_buffer_map_.empty()) {
+    // Need more stream to decode format, return true and schedule next buffer.
+    return true;
+  }
+
+  decoder_state_ = kDecoding;
+  ScheduleDecodeBufferTaskIfNeeded();
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DecodeBufferContinue(const void* data,
+                                                      size_t size) {
+  DVLOGF(4) << "data=" << data << ", size=" << size;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kDecoding);
+
+  // Both of these calls will set kError state if they fail.
+  // Only flush the frame if it's complete.
+  return (AppendToInputFrame(data, size) &&
+          (decoder_partial_frame_pending_ || FlushInputFrame()));
+}
+
+bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data,
+                                                    size_t size) {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+  DCHECK_NE(decoder_state_, kError);
+  // This routine can handle data == NULL and size == 0, which occurs when
+  // we queue an empty buffer for the purposes of flushing the pipe.
+
+  // Flush if we're too big
+  if (decoder_current_input_buffer_ != -1) {
+    InputRecord& input_record =
+        input_buffer_map_[decoder_current_input_buffer_];
+    if (input_record.bytes_used + size > input_record.length) {
+      if (!FlushInputFrame())
+        return false;
+      decoder_current_input_buffer_ = -1;
+    }
+  }
+
+  // Try to get an available input buffer
+  if (decoder_current_input_buffer_ == -1) {
+    if (free_input_buffers_.empty()) {
+      // See if we can get more free buffers from HW
+      Dequeue();
+      if (free_input_buffers_.empty()) {
+        // Nope!
+        DVLOGF(4) << "stalled for input buffers";
+        return false;
+      }
+    }
+    decoder_current_input_buffer_ = free_input_buffers_.back();
+    free_input_buffers_.pop_back();
+    InputRecord& input_record =
+        input_buffer_map_[decoder_current_input_buffer_];
+    DCHECK_EQ(input_record.bytes_used, 0);
+    DCHECK_EQ(input_record.input_id, -1);
+    DCHECK(decoder_current_bitstream_buffer_ != NULL);
+    input_record.input_id = decoder_current_bitstream_buffer_->input_id;
+  }
+
+  DCHECK(data != NULL || size == 0);
+  if (size == 0) {
+    // If we asked for an empty buffer, return now.  We return only after
+    // getting the next input buffer, since we might actually want an empty
+    // input buffer for flushing purposes.
+    return true;
+  }
+
+  // Copy in to the buffer.
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
+  if (size > input_record.length - input_record.bytes_used) {
+    VLOGF(1) << "over-size frame, erroring";
+    NOTIFY_ERROR(UNREADABLE_INPUT);
+    return false;
+  }
+  memcpy(reinterpret_cast<uint8_t*>(input_record.address) +
+             input_record.bytes_used,
+         data, size);
+  input_record.bytes_used += size;
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::FlushInputFrame() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+  DCHECK_NE(decoder_state_, kError);
+
+  if (decoder_current_input_buffer_ == -1)
+    return true;
+
+  InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_];
+  DCHECK_NE(input_record.input_id, -1);
+  DCHECK(input_record.input_id != kFlushBufferId ||
+         input_record.bytes_used == 0);
+  // * if input_id >= 0, this input buffer was prompted by a bitstream buffer we
+  //   got from the client.  We can skip it if it is empty.
+  // * if input_id < 0 (should be kFlushBufferId in this case), this input
+  //   buffer was prompted by a flush buffer, and should be queued even when
+  //   empty.
+  if (input_record.input_id >= 0 && input_record.bytes_used == 0) {
+    input_record.input_id = -1;
+    free_input_buffers_.push_back(decoder_current_input_buffer_);
+    decoder_current_input_buffer_ = -1;
+    return true;
+  }
+
+  // Queue it.
+  input_ready_queue_.push(decoder_current_input_buffer_);
+  decoder_current_input_buffer_ = -1;
+  DVLOGF(4) << "submitting input_id=" << input_record.input_id;
+  // Enqueue once since there's new available input for it.
+  Enqueue();
+
+  return (decoder_state_ != kError);
+}
+
+void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  if (decoder_state_ == kResetting) {
+    DVLOGF(3) << "early out: kResetting state";
+    return;
+  } else if (decoder_state_ == kError) {
+    DVLOGF(3) << "early out: kError state";
+    return;
+  } else if (decoder_state_ == kChangingResolution) {
+    DVLOGF(3) << "early out: kChangingResolution state";
+    return;
+  }
+
+  bool resolution_change_pending = false;
+  if (event_pending)
+    resolution_change_pending = DequeueResolutionChangeEvent();
+
+  if (!resolution_change_pending && coded_size_.IsEmpty()) {
+    // Some platforms do not send an initial resolution change event.
+    // To work around this, we need to keep checking if the initial resolution
+    // is known already by explicitly querying the format after each decode,
+    // regardless of whether we received an event.
+    // This needs to be done on initial resolution change,
+    // i.e. when coded_size_.IsEmpty().
+
+    // Try GetFormatInfo to check if an initial resolution change can be done.
+    struct v4l2_format format;
+    Size visible_size;
+    bool again;
+    if (GetFormatInfo(&format, &visible_size, &again) && !again) {
+      resolution_change_pending = true;
+      DequeueResolutionChangeEvent();
+    }
+  }
+
+  Dequeue();
+  Enqueue();
+
+  // Clear the interrupt fd.
+  if (!device_->ClearDevicePollInterrupt()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  bool poll_device = false;
+  // Add fd, if we should poll on it.
+  // Can be polled as soon as either input or output buffers are queued.
+  if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0)
+    poll_device = true;
+
+  // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
+  // so either:
+  // * device_poll_thread_ is running normally
+  // * device_poll_thread_ scheduled us, but then a ResetTask() or DestroyTask()
+  //   shut it down, in which case we're either in kResetting or kError states
+  //   respectively, and we should have early-outed already.
+  DCHECK(device_poll_thread_.message_loop());
+  // Queue the DevicePollTask() now.
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), poll_device));
+
+  DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC["
+            << decoder_input_queue_.size() << "->"
+            << input_ready_queue_.size() << "] => DEVICE["
+            << free_input_buffers_.size() << "+"
+            << input_buffer_queued_count_ << "/"
+            << input_buffer_map_.size() << "->"
+            << free_output_buffers_.size() << "+"
+            << output_buffer_queued_count_ << "/"
+            << output_buffer_map_.size() << "] => CLIENT["
+            << decoder_frames_at_client_ << "]";
+
+  ScheduleDecodeBufferTaskIfNeeded();
+  if (resolution_change_pending)
+    StartResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::Enqueue() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  // Drain the pipe of completed decode buffers.
+  const int old_inputs_queued = input_buffer_queued_count_;
+  while (!input_ready_queue_.empty()) {
+    const int buffer = input_ready_queue_.front();
+    InputRecord& input_record = input_buffer_map_[buffer];
+    if (input_record.input_id == kFlushBufferId && decoder_cmd_supported_) {
+      // Send the flush command after all input buffers are dequeued. This makes
+      // sure all previous resolution changes have been handled because the
+      // driver must hold the input buffer that triggers resolution change. The
+      // driver cannot decode data in it without new output buffers. If we send
+      // the flush now and a queued input buffer triggers resolution change
+      // later, the driver will send an output buffer that has
+      // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
+      // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
+      // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
+      // to the decoder.
+      if (input_buffer_queued_count_ == 0) {
+        if (!SendDecoderCmdStop())
+          return;
+        input_ready_queue_.pop();
+        free_input_buffers_.push_back(buffer);
+        input_record.input_id = -1;
+      } else {
+        break;
+      }
+    } else if (!EnqueueInputRecord())
+      return;
+  }
+  if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) {
+    // We just started up a previously empty queue.
+    // Queue state changed; signal interrupt.
+    if (!device_->SetDevicePollInterrupt()) {
+      VPLOGF(1) << "SetDevicePollInterrupt failed";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    // Start VIDIOC_STREAMON if we haven't yet.
+    if (!input_streamon_) {
+      __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+      IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+      input_streamon_ = true;
+    }
+  }
+
+  // Enqueue all the outputs we can.
+  const int old_outputs_queued = output_buffer_queued_count_;
+  while (!free_output_buffers_.empty()) {
+    if (!EnqueueOutputRecord())
+      return;
+  }
+  if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) {
+    // We just started up a previously empty queue.
+    // Queue state changed; signal interrupt.
+    if (!device_->SetDevicePollInterrupt()) {
+      VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return;
+    }
+    // Start VIDIOC_STREAMON if we haven't yet.
+    if (!output_streamon_) {
+      __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+      IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
+      output_streamon_ = true;
+    }
+  }
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DVLOGF(3);
+
+  struct v4l2_event ev;
+  memset(&ev, 0, sizeof(ev));
+
+  while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) {
+    if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
+      if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
+        VLOGF(2) << "got resolution change event.";
+        return true;
+      }
+    } else {
+      VLOGF(1) << "got an event (" << ev.type << ") we haven't subscribed to.";
+    }
+  }
+  return false;
+}
+
+void V4L2VideoDecodeAccelerator::Dequeue() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+
+  while (input_buffer_queued_count_ > 0) {
+    if (!DequeueInputBuffer())
+      break;
+  }
+  while (output_buffer_queued_count_ > 0) {
+    if (!DequeueOutputBuffer())
+      break;
+  }
+  NotifyFlushDoneIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(input_buffer_queued_count_, 0);
+  DCHECK(input_streamon_);
+
+  // Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free
+  // list.
+  struct v4l2_buffer dqbuf;
+  struct v4l2_plane planes[1];
+  memset(&dqbuf, 0, sizeof(dqbuf));
+  memset(planes, 0, sizeof(planes));
+  dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  dqbuf.memory = V4L2_MEMORY_MMAP;
+  dqbuf.m.planes = planes;
+  dqbuf.length = 1;
+  if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+    if (errno == EAGAIN) {
+      // EAGAIN if we're just out of buffers to dequeue.
+      return false;
+    }
+    VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  InputRecord& input_record = input_buffer_map_[dqbuf.index];
+  DCHECK(input_record.at_device);
+  free_input_buffers_.push_back(dqbuf.index);
+  input_record.at_device = false;
+  input_record.bytes_used = 0;
+  input_record.input_id = -1;
+  input_buffer_queued_count_--;
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(output_buffer_queued_count_, 0);
+  DCHECK(output_streamon_);
+
+  // Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the
+  // completed queue.
+  struct v4l2_buffer dqbuf;
+  std::unique_ptr<struct v4l2_plane[]> planes(
+      new v4l2_plane[output_planes_count_]);
+  memset(&dqbuf, 0, sizeof(dqbuf));
+  memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_);
+  dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  dqbuf.memory = V4L2_MEMORY_MMAP;
+  dqbuf.m.planes = planes.get();
+  dqbuf.length = output_planes_count_;
+  if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
+    if (errno == EAGAIN) {
+      // EAGAIN if we're just out of buffers to dequeue.
+      return false;
+    } else if (errno == EPIPE) {
+      DVLOGF(3) << "Got EPIPE. Last output buffer was already dequeued.";
+      return false;
+    }
+    VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  OutputRecord& output_record = output_buffer_map_[dqbuf.index];
+  DCHECK_EQ(output_record.state, kAtDevice);
+  DCHECK_NE(output_record.picture_id, -1);
+  output_buffer_queued_count_--;
+  if (dqbuf.m.planes[0].bytesused == 0) {
+    // This is an empty output buffer returned as part of a flush.
+    output_record.state = kFree;
+    free_output_buffers_.push_back(dqbuf.index);
+  } else {
+    int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec;
+    DCHECK_GE(bitstream_buffer_id, 0);
+    DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << dqbuf.index
+              << " bitstream input_id=" << bitstream_buffer_id;
+    output_record.state = kAtClient;
+    decoder_frames_at_client_++;
+
+    const Picture picture(output_record.picture_id, bitstream_buffer_id,
+                          Rect(visible_size_), false);
+    pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
+    SendPictureReady();
+    output_record.cleared = true;
+  }
+  if (dqbuf.flags & V4L2_BUF_FLAG_LAST) {
+    DVLOGF(3) << "Got last output buffer. Waiting last buffer="
+              << flush_awaiting_last_output_buffer_;
+    if (flush_awaiting_last_output_buffer_) {
+      flush_awaiting_last_output_buffer_ = false;
+      struct v4l2_decoder_cmd cmd;
+      memset(&cmd, 0, sizeof(cmd));
+      cmd.cmd = V4L2_DEC_CMD_START;
+      IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+    }
+  }
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() {
+  DVLOGF(4);
+  DCHECK(!input_ready_queue_.empty());
+
+  // Enqueue an input (VIDEO_OUTPUT) buffer.
+  const int buffer = input_ready_queue_.front();
+  InputRecord& input_record = input_buffer_map_[buffer];
+  DCHECK(!input_record.at_device);
+  struct v4l2_buffer qbuf;
+  struct v4l2_plane qbuf_plane;
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(&qbuf_plane, 0, sizeof(qbuf_plane));
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  qbuf.timestamp.tv_sec = input_record.input_id;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = &qbuf_plane;
+  qbuf.m.planes[0].bytesused = input_record.bytes_used;
+  qbuf.length = 1;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  input_ready_queue_.pop();
+  input_record.at_device = true;
+  input_buffer_queued_count_++;
+  DVLOGF(4) << "enqueued input_id=" << input_record.input_id
+            << " size=" << input_record.bytes_used;
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
+  DCHECK(!free_output_buffers_.empty());
+
+  // Enqueue an output (VIDEO_CAPTURE) buffer.
+  const int buffer = free_output_buffers_.front();
+  DVLOGF(4) << "buffer " << buffer;
+  OutputRecord& output_record = output_buffer_map_[buffer];
+  DCHECK_EQ(output_record.state, kFree);
+  DCHECK_NE(output_record.picture_id, -1);
+  struct v4l2_buffer qbuf;
+  std::unique_ptr<struct v4l2_plane[]> qbuf_planes(
+      new v4l2_plane[output_planes_count_]);
+  memset(&qbuf, 0, sizeof(qbuf));
+  memset(qbuf_planes.get(), 0,
+         sizeof(struct v4l2_plane) * output_planes_count_);
+  qbuf.index = buffer;
+  qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  qbuf.memory = V4L2_MEMORY_MMAP;
+  qbuf.m.planes = qbuf_planes.get();
+  qbuf.length = output_planes_count_;
+  DVLOGF(4) << "qbuf.index=" << qbuf.index;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
+  free_output_buffers_.pop_front();
+  output_record.state = kAtDevice;
+  output_buffer_queued_count_++;
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(int32_t picture_buffer_id) {
+  DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // We run ReusePictureBufferTask even if we're in kResetting.
+  if (decoder_state_ == kError) {
+    DVLOGF(4) << "early out: kError state";
+    return;
+  }
+
+  if (decoder_state_ == kChangingResolution) {
+    DVLOGF(4) << "early out: kChangingResolution";
+    return;
+  }
+
+  size_t index;
+  for (index = 0; index < output_buffer_map_.size(); ++index)
+    if (output_buffer_map_[index].picture_id == picture_buffer_id)
+      break;
+
+  if (index >= output_buffer_map_.size()) {
+    // It's possible that we've already posted a DismissPictureBuffer for this
+    // picture, but it has not yet executed when this ReusePictureBuffer was
+    // posted to us by the client. In that case just ignore this (we've already
+    // dismissed it and accounted for that) and let the sync object get
+    // destroyed.
+    DVLOGF(3) << "got picture id= " << picture_buffer_id
+              << " not in use (anymore?).";
+    return;
+  }
+
+  OutputRecord& output_record = output_buffer_map_[index];
+  if (output_record.state != kAtClient) {
+    VLOGF(1) << "picture_buffer_id not reusable";
+    NOTIFY_ERROR(INVALID_ARGUMENT);
+    return;
+  }
+
+  output_record.state = kFree;
+  free_output_buffers_.push_back(index);
+  decoder_frames_at_client_--;
+  // We got a buffer back, so enqueue it back.
+  Enqueue();
+}
+
+void V4L2VideoDecodeAccelerator::FlushTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  // We don't support stacked flushing.
+  DCHECK(!decoder_flushing_);
+
+  // Queue up an empty buffer -- this triggers the flush.
+  decoder_input_queue_.push(
+      linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
+          decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
+  decoder_flushing_ = true;
+  SendPictureReady();  // Send all pending PictureReady.
+
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  if (!decoder_flushing_)
+    return;
+
+  // Pipeline is empty when:
+  // * Decoder input queue is empty of non-delayed buffers.
+  // * There is no currently filling input buffer.
+  // * Input holding queue is empty.
+  // * All input (VIDEO_OUTPUT) buffers are returned.
+  // * All image processor buffers are returned.
+  if (!decoder_input_queue_.empty()) {
+    if (decoder_input_queue_.front()->input_id !=
+        decoder_delay_bitstream_buffer_id_) {
+      DVLOGF(3) << "Some input bitstream buffers are not queued.";
+      return;
+    }
+  }
+  if (decoder_current_input_buffer_ != -1) {
+    DVLOGF(3) << "Current input buffer != -1";
+    return;
+  }
+  if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) {
+    DVLOGF(3) << "Some input buffers are not dequeued.";
+    return;
+  }
+  if (flush_awaiting_last_output_buffer_) {
+    DVLOGF(3) << "Waiting for last output buffer.";
+    return;
+  }
+
+  // TODO(posciak): https://crbug.com/270039. Exynos requires a
+  // streamoff-streamon sequence after flush to continue, even if we are not
+  // resetting. This would make sense, because we don't really want to resume
+  // from a non-resume point (e.g. not from an IDR) if we are flushed.
+  // MSE player however triggers a Flush() on chunk end, but never Reset(). One
+  // could argue either way, or even say that Flush() is not needed/harmful when
+  // transitioning to next chunk.
+  // For now, do the streamoff-streamon cycle to satisfy Exynos and not freeze
+  // when doing MSE. This should be harmless otherwise.
+  if (!(StopDevicePoll() && StopOutputStream() && StopInputStream()))
+    return;
+
+  if (!StartDevicePoll())
+    return;
+
+  decoder_delay_bitstream_buffer_id_ = -1;
+  decoder_flushing_ = false;
+  VLOGF(2) << "returning flush";
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyFlushDone, client_));
+
+  // While we were flushing, we early-outed DecodeBufferTask()s.
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+bool V4L2VideoDecodeAccelerator::IsDecoderCmdSupported() {
+  // CMD_STOP should always succeed. If the decoder is started, the command can
+  // flush it. If the decoder is stopped, the command does nothing. We use this
+  // to know if a driver supports V4L2_DEC_CMD_STOP to flush.
+  struct v4l2_decoder_cmd cmd;
+  memset(&cmd, 0, sizeof(cmd));
+  cmd.cmd = V4L2_DEC_CMD_STOP;
+  if (device_->Ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
+    VLOGF(2) << "V4L2_DEC_CMD_STOP is not supported.";
+    return false;
+  }
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::SendDecoderCmdStop() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!flush_awaiting_last_output_buffer_);
+
+  struct v4l2_decoder_cmd cmd;
+  memset(&cmd, 0, sizeof(cmd));
+  cmd.cmd = V4L2_DEC_CMD_STOP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
+  flush_awaiting_last_output_buffer_ = true;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::ResetTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+  decoder_current_bitstream_buffer_.reset();
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+
+  decoder_current_input_buffer_ = -1;
+
+  // If we are in the middle of switching resolutions or awaiting picture
+  // buffers, postpone reset until it's done. We don't have to worry about
+  // timing of this wrt to decoding, because output pipe is already
+  // stopped if we are changing resolution. We will come back here after
+  // we are done.
+  DCHECK(!reset_pending_);
+  if (decoder_state_ == kChangingResolution ||
+      decoder_state_ == kAwaitingPictureBuffers) {
+    reset_pending_ = true;
+    return;
+  }
+  FinishReset();
+}
+
+void V4L2VideoDecodeAccelerator::FinishReset() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  reset_pending_ = false;
+  // After the output stream is stopped, the codec should not post any
+  // resolution change events. So we dequeue the resolution change event
+  // afterwards. The event could be posted before or while stopping the output
+  // stream. The codec will expect the buffer of new size after the seek, so
+  // we need to handle the resolution change event first.
+  if (!(StopDevicePoll() && StopOutputStream()))
+    return;
+
+  if (DequeueResolutionChangeEvent()) {
+    reset_pending_ = true;
+    StartResolutionChange();
+    return;
+  }
+
+  if (!StopInputStream())
+    return;
+
+  // If we were flushing, we'll never return any more BitstreamBuffers or
+  // PictureBuffers; they have all been dropped and returned by now.
+  NotifyFlushDoneIfNeeded();
+
+  // Mark that we're resetting, then enqueue a ResetDoneTask().  All intervening
+  // jobs will early-out in the kResetting state.
+  decoder_state_ = kResetting;
+  SendPictureReady();  // Send all pending PictureReady.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ResetDoneTask,
+                            base::Unretained(this)));
+}
+
+void V4L2VideoDecodeAccelerator::ResetDoneTask() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  // Start poll thread if NotifyFlushDoneIfNeeded has not already.
+  if (!device_poll_thread_.IsRunning()) {
+    if (!StartDevicePoll())
+      return;
+  }
+
+  // Reset format-specific bits.
+  if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
+    decoder_h264_parser_.reset(new H264Parser());
+  }
+
+  // Jobs drained, we're finished resetting.
+  DCHECK_EQ(decoder_state_, kResetting);
+  decoder_state_ = kInitialized;
+
+  decoder_partial_frame_pending_ = false;
+  decoder_delay_bitstream_buffer_id_ = -1;
+  child_task_runner_->PostTask(FROM_HERE,
+                               base::Bind(&Client::NotifyResetDone, client_));
+
+  // While we were resetting, we early-outed DecodeBufferTask()s.
+  ScheduleDecodeBufferTaskIfNeeded();
+}
+
+void V4L2VideoDecodeAccelerator::DestroyTask() {
+  VLOGF(2);
+
+  // DestroyTask() should run regardless of decoder_state_.
+
+  StopDevicePoll();
+  StopOutputStream();
+  StopInputStream();
+
+  decoder_current_bitstream_buffer_.reset();
+  decoder_current_input_buffer_ = -1;
+  decoder_decode_buffer_tasks_scheduled_ = 0;
+  decoder_frames_at_client_ = 0;
+  while (!decoder_input_queue_.empty())
+    decoder_input_queue_.pop();
+  decoder_flushing_ = false;
+
+  // Set our state to kError.  Just in case.
+  decoder_state_ = kError;
+
+  DestroyInputBuffers();
+  DestroyOutputBuffers();
+}
+
+bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
+  DVLOGF(3);
+  DCHECK(!device_poll_thread_.IsRunning());
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // Start up the device poll thread and schedule its first DevicePollTask().
+  if (!device_poll_thread_.Start()) {
+    VLOGF(1) << "Device thread failed to start";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  device_poll_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask,
+                            base::Unretained(this), 0));
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
+  DVLOGF(3);
+
+  if (!device_poll_thread_.IsRunning())
+    return true;
+
+  if (decoder_thread_.IsRunning())
+    DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  // Signal the DevicePollTask() to stop, and stop the device poll thread.
+  if (!device_->SetDevicePollInterrupt()) {
+    VPLOGF(1) << "SetDevicePollInterrupt(): failed";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  device_poll_thread_.Stop();
+  // Clear the interrupt now, to be sure.
+  if (!device_->ClearDevicePollInterrupt()) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return false;
+  }
+  DVLOGF(3) << "device poll stopped";
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopOutputStream() {
+  VLOGF(2);
+  if (!output_streamon_)
+    return true;
+
+  __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+  output_streamon_ = false;
+
+  // Output stream is stopped. No need to wait for the buffer anymore.
+  flush_awaiting_last_output_buffer_ = false;
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    // After streamoff, the device drops ownership of all buffers, even if we
+    // don't dequeue them explicitly. Some of them may still be owned by the
+    // client however. Reuse only those that aren't.
+    OutputRecord& output_record = output_buffer_map_[i];
+    if (output_record.state == kAtDevice) {
+      output_record.state = kFree;
+      free_output_buffers_.push_back(i);
+    }
+  }
+  output_buffer_queued_count_ = 0;
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::StopInputStream() {
+  VLOGF(2);
+  if (!input_streamon_)
+    return true;
+
+  __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
+  input_streamon_ = false;
+
+  // Reset accounting info for input.
+  while (!input_ready_queue_.empty())
+    input_ready_queue_.pop();
+  free_input_buffers_.clear();
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    free_input_buffers_.push_back(i);
+    input_buffer_map_[i].at_device = false;
+    input_buffer_map_[i].bytes_used = 0;
+    input_buffer_map_[i].input_id = -1;
+  }
+  input_buffer_queued_count_ = 0;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::StartResolutionChange() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_NE(decoder_state_, kUninitialized);
+  DCHECK_NE(decoder_state_, kResetting);
+
+  VLOGF(2) << "Initiate resolution change";
+
+  if (!(StopDevicePoll() && StopOutputStream()))
+    return;
+
+  decoder_state_ = kChangingResolution;
+  SendPictureReady();  // Send all pending PictureReady.
+
+  if (!DestroyOutputBuffers()) {
+    VLOGF(1) << "Failed destroying output buffers.";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  FinishResolutionChange();
+}
+
+void V4L2VideoDecodeAccelerator::FinishResolutionChange() {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kChangingResolution);
+  VLOGF(2);
+
+  if (decoder_state_ == kError) {
+    VLOGF(2) << "early out: kError state";
+    return;
+  }
+
+  struct v4l2_format format;
+  bool again;
+  Size visible_size;
+  bool ret = GetFormatInfo(&format, &visible_size, &again);
+  if (!ret || again) {
+    VLOGF(1) << "Couldn't get format information after resolution change";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  if (!CreateBuffersForFormat(format, visible_size)) {
+    VLOGF(1) << "Couldn't reallocate buffers after resolution change";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  if (!StartDevicePoll())
+    return;
+}
+
+void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) {
+  DVLOGF(4);
+  DCHECK(device_poll_thread_.task_runner()->BelongsToCurrentThread());
+
+  bool event_pending = false;
+
+  if (!device_->Poll(poll_device, &event_pending)) {
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    return;
+  }
+
+  // All processing should happen on ServiceDeviceTask(), since we shouldn't
+  // touch decoder state from this thread.
+  decoder_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
+                            base::Unretained(this), event_pending));
+}
+
+void V4L2VideoDecodeAccelerator::NotifyError(Error error) {
+  VLOGF(1);
+
+  if (!child_task_runner_->BelongsToCurrentThread()) {
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::NotifyError,
+                              weak_this_, error));
+    return;
+  }
+
+  if (client_) {
+    client_->NotifyError(error);
+    client_ptr_factory_.reset();
+  }
+}
+
+void V4L2VideoDecodeAccelerator::SetErrorState(Error error) {
+  // We can touch decoder_state_ only if this is the decoder thread or the
+  // decoder thread isn't running.
+  if (decoder_thread_.task_runner() &&
+      !decoder_thread_.task_runner()->BelongsToCurrentThread()) {
+    decoder_thread_.task_runner()->PostTask(
+        FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::SetErrorState,
+                              base::Unretained(this), error));
+    return;
+  }
+
+  // Post NotifyError only if we are already initialized, as the API does
+  // not allow doing so before that.
+  if (decoder_state_ != kError && decoder_state_ != kUninitialized)
+    NotifyError(error);
+
+  decoder_state_ = kError;
+}
+
+bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format,
+                                               Size* visible_size,
+                                               bool* again) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  *again = false;
+  memset(format, 0, sizeof(*format));
+  format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  if (device_->Ioctl(VIDIOC_G_FMT, format) != 0) {
+    if (errno == EINVAL) {
+      // EINVAL means we haven't seen sufficient stream to decode the format.
+      *again = true;
+      return true;
+    } else {
+      VPLOGF(1) << "ioctl() failed: VIDIOC_G_FMT";
+      NOTIFY_ERROR(PLATFORM_FAILURE);
+      return false;
+    }
+  }
+
+  // Make sure we are still getting the format we set on initialization.
+  if (format->fmt.pix_mp.pixelformat != output_format_fourcc_) {
+    VLOGF(1) << "Unexpected format from G_FMT on output";
+    return false;
+  }
+
+  Size coded_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+  if (visible_size != nullptr)
+    *visible_size = GetVisibleSize(coded_size);
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat(
+    const struct v4l2_format& format,
+    const Size& visible_size) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  output_planes_count_ = format.fmt.pix_mp.num_planes;
+  coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height);
+  visible_size_ = visible_size;
+
+  VLOGF(2) << "new resolution: " << coded_size_.ToString()
+           << ", visible size: " << visible_size_.ToString()
+           << ", decoder output planes count: " << output_planes_count_;
+
+  return CreateOutputBuffers();
+}
+
+Size V4L2VideoDecodeAccelerator::GetVisibleSize(
+    const Size& coded_size) {
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+  struct v4l2_rect* visible_rect;
+  struct v4l2_selection selection_arg;
+  memset(&selection_arg, 0, sizeof(selection_arg));
+  selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  selection_arg.target = V4L2_SEL_TGT_COMPOSE;
+
+  if (device_->Ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
+    VLOGF(2) << "VIDIOC_G_SELECTION is supported";
+    visible_rect = &selection_arg.r;
+  } else {
+    VLOGF(2) << "Fallback to VIDIOC_G_CROP";
+    struct v4l2_crop crop_arg;
+    memset(&crop_arg, 0, sizeof(crop_arg));
+    crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+    if (device_->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
+      VPLOGF(1) << "ioctl() VIDIOC_G_CROP failed";
+      return coded_size;
+    }
+    visible_rect = &crop_arg.c;
+  }
+
+  Rect rect(visible_rect->left, visible_rect->top, visible_rect->width,
+            visible_rect->height);
+  VLOGF(2) << "visible rectangle is " << rect.ToString();
+  if (!Rect(coded_size).Contains(rect)) {
+    DVLOGF(3) << "visible rectangle " << rect.ToString()
+              << " is not inside coded size " << coded_size.ToString();
+    return coded_size;
+  }
+  if (rect.IsEmpty()) {
+    VLOGF(1) << "visible size is empty";
+    return coded_size;
+  }
+
+  // Chrome assume picture frame is coded at (0, 0).
+  if (rect.x() != 0 || rect.y() != 0) {
+    VLOGF(1) << "Unexpected visible rectangle " << rect.ToString()
+             << ", top-left is not origin";
+    return coded_size;
+  }
+
+  return rect.size();
+}
+
+bool V4L2VideoDecodeAccelerator::CreateInputBuffers() {
+  VLOGF(2);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  // We always run this as we prepare to initialize.
+  DCHECK_EQ(decoder_state_, kInitialized);
+  DCHECK(!input_streamon_);
+  DCHECK(input_buffer_map_.empty());
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = kInputBufferCount;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
+  input_buffer_map_.resize(reqbufs.count);
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    free_input_buffers_.push_back(i);
+
+    // Query for the MEMORY_MMAP pointer.
+    struct v4l2_plane planes[1];
+    struct v4l2_buffer buffer;
+    memset(&buffer, 0, sizeof(buffer));
+    memset(planes, 0, sizeof(planes));
+    buffer.index = i;
+    buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+    buffer.memory = V4L2_MEMORY_MMAP;
+    buffer.m.planes = planes;
+    buffer.length = 1;
+    IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
+    void* address = device_->Mmap(NULL,
+                                  buffer.m.planes[0].length,
+                                  PROT_READ | PROT_WRITE,
+                                  MAP_SHARED,
+                                  buffer.m.planes[0].m.mem_offset);
+    if (address == MAP_FAILED) {
+      VPLOGF(1) << "mmap() failed";
+      return false;
+    }
+    input_buffer_map_[i].address = address;
+    input_buffer_map_[i].length = buffer.m.planes[0].length;
+  }
+
+  return true;
+}
+
+static bool IsSupportedOutputFormat(uint32_t v4l2_format) {
+  // Only support V4L2_PIX_FMT_NV12 output format for now.
+  // TODO(johnylin): add more supported format if necessary.
+  uint32_t kSupportedOutputFmtFourcc[] = { V4L2_PIX_FMT_NV12 };
+  return std::find(
+      kSupportedOutputFmtFourcc,
+      kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc),
+      v4l2_format) !=
+          kSupportedOutputFmtFourcc + arraysize(kSupportedOutputFmtFourcc);
+}
+
+bool V4L2VideoDecodeAccelerator::SetupFormats() {
+  // We always run this as we prepare to initialize.
+  DCHECK(child_task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(decoder_state_, kUninitialized);
+  DCHECK(!input_streamon_);
+  DCHECK(!output_streamon_);
+
+  size_t input_size;
+  Size max_resolution, min_resolution;
+  device_->GetSupportedResolution(input_format_fourcc_, &min_resolution,
+                                  &max_resolution);
+  if (max_resolution.width() > 1920 && max_resolution.height() > 1088)
+    input_size = kInputBufferMaxSizeFor4k;
+  else
+    input_size = kInputBufferMaxSizeFor1080p;
+
+  struct v4l2_fmtdesc fmtdesc;
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  bool is_format_supported = false;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (fmtdesc.pixelformat == input_format_fourcc_) {
+      is_format_supported = true;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (!is_format_supported) {
+    VLOGF(1) << "Input fourcc " << input_format_fourcc_
+             << " not supported by device.";
+    return false;
+  }
+
+  struct v4l2_format format;
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  format.fmt.pix_mp.pixelformat = input_format_fourcc_;
+  format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size;
+  format.fmt.pix_mp.num_planes = 1;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  // We have to set up the format for output, because the driver may not allow
+  // changing it once we start streaming; whether it can support our chosen
+  // output format or not may depend on the input format.
+  memset(&fmtdesc, 0, sizeof(fmtdesc));
+  fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
+    if (IsSupportedOutputFormat(fmtdesc.pixelformat)) {
+      output_format_fourcc_ = fmtdesc.pixelformat;
+      break;
+    }
+    ++fmtdesc.index;
+  }
+
+  if (output_format_fourcc_ == 0) {
+    VLOGF(2) << "Image processor not available";
+    return false;
+  }
+  VLOGF(2) << "Output format=" << output_format_fourcc_;
+
+  // Just set the fourcc for output; resolution, etc., will come from the
+  // driver once it extracts it from the stream.
+  memset(&format, 0, sizeof(format));
+  format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  format.fmt.pix_mp.pixelformat = output_format_fourcc_;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
+
+  return true;
+}
+
+bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
+  VLOGF(2);
+  DCHECK(decoder_state_ == kInitialized ||
+         decoder_state_ == kChangingResolution);
+  DCHECK(!output_streamon_);
+  DCHECK(output_buffer_map_.empty());
+  DCHECK_EQ(output_mode_, Config::OutputMode::IMPORT);
+
+  // Number of output buffers we need.
+  struct v4l2_control ctrl;
+  memset(&ctrl, 0, sizeof(ctrl));
+  ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+  IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_CTRL, &ctrl);
+  output_dpb_size_ = ctrl.value;
+
+  // Output format setup in Initialize().
+
+  uint32_t buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount;
+
+  VideoPixelFormat pixel_format =
+      V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
+
+  child_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
+                            buffer_count, pixel_format, coded_size_));
+
+
+  // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
+  // or event handling while we are waiting for AssignPictureBuffers(). Not
+  // having Pictures available would not have prevented us from making decoding
+  // progress entirely e.g. in the case of H.264 where we could further decode
+  // non-slice NALUs and could even get another resolution change before we were
+  // done with this one. After we get the buffers, we'll go back into kIdle and
+  // kick off further event processing, and eventually go back into kDecoding
+  // once no more events are pending (if any).
+  decoder_state_ = kAwaitingPictureBuffers;
+
+  return true;
+}
+
+void V4L2VideoDecodeAccelerator::DestroyInputBuffers() {
+  VLOGF(2);
+  DCHECK(!decoder_thread_.IsRunning() ||
+         decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!input_streamon_);
+
+  if (input_buffer_map_.empty())
+    return;
+
+  for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
+    if (input_buffer_map_[i].address != NULL) {
+      device_->Munmap(input_buffer_map_[i].address,
+                      input_buffer_map_[i].length);
+    }
+  }
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
+
+  input_buffer_map_.clear();
+  free_input_buffers_.clear();
+}
+
+bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
+  VLOGF(2);
+  DCHECK(!decoder_thread_.IsRunning() ||
+         decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK(!output_streamon_);
+  bool success = true;
+
+  if (output_buffer_map_.empty())
+    return true;
+
+  for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+    OutputRecord& output_record = output_buffer_map_[i];
+
+    DVLOGF(3) << "dismissing PictureBuffer id=" << output_record.picture_id;
+    child_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&Client::DismissPictureBuffer, client_,
+                              output_record.picture_id));
+  }
+
+  struct v4l2_requestbuffers reqbufs;
+  memset(&reqbufs, 0, sizeof(reqbufs));
+  reqbufs.count = 0;
+  reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+  reqbufs.memory = V4L2_MEMORY_MMAP;
+  if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
+    VPLOGF(1) << "ioctl() failed: VIDIOC_REQBUFS";
+    NOTIFY_ERROR(PLATFORM_FAILURE);
+    success = false;
+  }
+
+  output_buffer_map_.clear();
+  while (!free_output_buffers_.empty())
+    free_output_buffers_.pop_front();
+  output_buffer_queued_count_ = 0;
+  // The client may still hold some buffers. The texture holds a reference to
+  // the buffer. It is OK to free the buffer and destroy EGLImage here.
+  decoder_frames_at_client_ = 0;
+
+  return success;
+}
+
+void V4L2VideoDecodeAccelerator::SendPictureReady() {
+  DVLOGF(4);
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  bool send_now = (decoder_state_ == kChangingResolution ||
+                   decoder_state_ == kResetting || decoder_flushing_);
+  while (pending_picture_ready_.size() > 0) {
+    bool cleared = pending_picture_ready_.front().cleared;
+    const Picture& picture = pending_picture_ready_.front().picture;
+    if (cleared && picture_clearing_count_ == 0) {
+      // This picture is cleared. It can be posted to a thread different than
+      // the main GPU thread to reduce latency. This should be the case after
+      // all pictures are cleared at the beginning.
+      decode_task_runner_->PostTask(
+          FROM_HERE,
+          base::Bind(&Client::PictureReady, decode_client_, picture));
+      pending_picture_ready_.pop();
+    } else if (!cleared || send_now) {
+      DVLOGF(4) << "cleared=" << pending_picture_ready_.front().cleared
+                << ", decoder_state_=" << decoder_state_
+                << ", decoder_flushing_=" << decoder_flushing_
+                << ", picture_clearing_count_=" << picture_clearing_count_;
+      // If the picture is not cleared, post it to the child thread because it
+      // has to be cleared in the child thread. A picture only needs to be
+      // cleared once. If the decoder is changing resolution, resetting or
+      // flushing, send all pictures to ensure PictureReady arrive before
+      // ProvidePictureBuffers, NotifyResetDone, or NotifyFlushDone.
+      child_task_runner_->PostTaskAndReply(
+          FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
+          // Unretained is safe. If Client::PictureReady gets to run, |this| is
+          // alive. Destroy() will wait the decode thread to finish.
+          base::Bind(&V4L2VideoDecodeAccelerator::PictureCleared,
+                     base::Unretained(this)));
+      picture_clearing_count_++;
+      pending_picture_ready_.pop();
+    } else {
+      // This picture is cleared. But some pictures are about to be cleared on
+      // the child thread. To preserve the order, do not send this until those
+      // pictures are cleared.
+      break;
+    }
+  }
+}
+
+void V4L2VideoDecodeAccelerator::PictureCleared() {
+  DVLOGF(4) << "clearing count=" << picture_clearing_count_;
+  DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+  DCHECK_GT(picture_clearing_count_, 0);
+  picture_clearing_count_--;
+  SendPictureReady();
+}
+
+}  // namespace media
diff --git a/vda/v4l2_video_decode_accelerator.h b/vda/v4l2_video_decode_accelerator.h
new file mode 100644
index 0000000..e18cab4
--- /dev/null
+++ b/vda/v4l2_video_decode_accelerator.h
@@ -0,0 +1,513 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of VideoDecodeAccelerator
+// that utilizes hardware video decoders, which expose Video4Linux 2 API
+// (http://linuxtv.org/downloads/v4l-dvb-apis/).
+// Note: ported from Chromium commit head: 85fdf90
+// Note: image processor is not ported.
+
+#ifndef MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/linked_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "picture.h"
+#include "size.h"
+#include "v4l2_device.h"
+#include "video_decode_accelerator.h"
+
+namespace media {
+
+class H264Parser;
+
+// This class handles video accelerators directly through a V4L2 device exported
+// by the hardware blocks.
+//
+// The threading model of this class is driven by the fact that it needs to
+// interface two fundamentally different event queues -- the one Chromium
+// provides through MessageLoop, and the one driven by the V4L2 devices which
+// is waited on with epoll().  There are three threads involved in this class:
+//
+// * The child thread, which is the main GPU process thread which calls the
+//   VideoDecodeAccelerator entry points.  Calls from this thread
+//   generally do not block (with the exception of Initialize() and Destroy()).
+//   They post tasks to the decoder_thread_, which actually services the task
+//   and calls back when complete through the
+//   VideoDecodeAccelerator::Client interface.
+// * The decoder_thread_, owned by this class.  It services API tasks, through
+//   the *Task() routines, as well as V4L2 device events, through
+//   ServiceDeviceTask().  Almost all state modification is done on this thread
+//   (this doesn't include buffer (re)allocation sequence, see below).
+// * The device_poll_thread_, owned by this class.  All it does is epoll() on
+//   the V4L2 in DevicePollTask() and schedule a ServiceDeviceTask() on the
+//   decoder_thread_ when something interesting happens.
+//   TODO(sheu): replace this thread with an TYPE_IO decoder_thread_.
+//
+// Note that this class has (almost) no locks, apart from the pictures_assigned_
+// WaitableEvent. Everything (apart from buffer (re)allocation) is serviced on
+// the decoder_thread_, so there are no synchronization issues.
+// ... well, there are, but it's a matter of getting messages posted in the
+// right order, not fiddling with locks.
+// Buffer creation is a two-step process that is serviced partially on the
+// Child thread, because we need to wait for the client to provide textures
+// for the buffers we allocate. We cannot keep the decoder thread running while
+// the client allocates Pictures for us, because we need to REQBUFS first to get
+// the required number of output buffers from the device and that cannot be done
+// unless we free the previous set of buffers, leaving the decoding in a
+// inoperable state for the duration of the wait for Pictures. So to prevent
+// subtle races (esp. if we get Reset() in the meantime), we block the decoder
+// thread while we wait for AssignPictureBuffers from the client.
+//
+// V4L2VideoDecodeAccelerator may use image processor to convert the output.
+// There are three cases:
+// Flush: V4L2VDA should wait until image processor returns all processed
+//   frames.
+// Reset: V4L2VDA doesn't need to wait for image processor. When image processor
+//   returns an old frame, drop it.
+// Resolution change: V4L2VDA destroy image processor when destroying output
+//   buffrers. We cannot drop any frame during resolution change. So V4L2VDA
+//   should destroy output buffers after image processor returns all the frames.
+class V4L2VideoDecodeAccelerator
+    : public VideoDecodeAccelerator {
+ public:
+  V4L2VideoDecodeAccelerator(
+      const scoped_refptr<V4L2Device>& device);
+  ~V4L2VideoDecodeAccelerator() override;
+
+  // VideoDecodeAccelerator implementation.
+  // Note: Initialize() and Destroy() are synchronous.
+  bool Initialize(const Config& config, Client* client) override;
+  void Decode(const BitstreamBuffer& bitstream_buffer) override;
+  void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
+  void ImportBufferForPicture(
+      int32_t picture_buffer_id,
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle) override;
+  void ReusePictureBuffer(int32_t picture_buffer_id) override;
+  void Flush() override;
+  void Reset() override;
+  void Destroy() override;
+  bool TryToSetupDecodeOnSeparateThread(
+      const base::WeakPtr<Client>& decode_client,
+      const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
+      override;
+
+  static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
+
+ private:
+  // These are rather subjectively tuned.
+  enum {
+    kInputBufferCount = 8,
+    // TODO(posciak): determine input buffer size based on level limits.
+    // See http://crbug.com/255116.
+    // Input bitstream buffer size for up to 1080p streams.
+    kInputBufferMaxSizeFor1080p = 1024 * 1024,
+    // Input bitstream buffer size for up to 4k streams.
+    kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p,
+    // This is originally from media/base/limits.h in Chromium.
+    kMaxVideoFrames = 4,
+    // Number of output buffers to use for each VDA stage above what's required
+    // by the decoder (e.g. DPB size, in H264).  We need
+    // limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
+    // and +1 for a frame in transit.
+    kDpbOutputBufferExtraCount = kMaxVideoFrames + 1,
+    // Number of extra output buffers if image processor is used.
+    kDpbOutputBufferExtraCountForImageProcessor = 1,
+  };
+
+  // Internal state of the decoder.
+  enum State {
+    kUninitialized,  // Initialize() not yet called.
+    kInitialized,    // Initialize() returned true; ready to start decoding.
+    kDecoding,       // DecodeBufferInitial() successful; decoding frames.
+    kResetting,      // Presently resetting.
+    // Performing resolution change and waiting for image processor to return
+    // all frames.
+    kChangingResolution,
+    // Requested new PictureBuffers via ProvidePictureBuffers(), awaiting
+    // AssignPictureBuffers().
+    kAwaitingPictureBuffers,
+    kError,  // Error in kDecoding state.
+  };
+
+  enum OutputRecordState {
+    kFree,         // Ready to be queued to the device.
+    kAtDevice,     // Held by device.
+    kAtProcessor,  // Held by image processor.
+    kAtClient,     // Held by client of V4L2VideoDecodeAccelerator.
+  };
+
+  enum BufferId {
+    kFlushBufferId = -2  // Buffer id for flush buffer, queued by FlushTask().
+  };
+
+  // Auto-destruction reference for BitstreamBuffer, for message-passing from
+  // Decode() to DecodeTask().
+  struct BitstreamBufferRef;
+
+  // Record for decoded pictures that can be sent to PictureReady.
+  struct PictureRecord {
+    PictureRecord(bool cleared, const Picture& picture);
+    ~PictureRecord();
+    bool cleared;     // Whether the texture is cleared and safe to render from.
+    Picture picture;  // The decoded picture.
+  };
+
+  // Record for input buffers.
+  struct InputRecord {
+    InputRecord();
+    ~InputRecord();
+    bool at_device;    // held by device.
+    void* address;     // mmap() address.
+    size_t length;     // mmap() length.
+    off_t bytes_used;  // bytes filled in the mmap() segment.
+    int32_t input_id;  // triggering input_id as given to Decode().
+  };
+
+  // Record for output buffers.
+  struct OutputRecord {
+    OutputRecord();
+    OutputRecord(OutputRecord&&) = default;
+    ~OutputRecord();
+    OutputRecordState state;
+    int32_t picture_id;     // picture buffer id as returned to PictureReady().
+    bool cleared;           // Whether the texture is cleared and safe to render
+                            // from. See TextureManager for details.
+    // Output fds of the processor. Used only when OutputMode is IMPORT.
+    std::vector<base::ScopedFD> processor_output_fds;
+  };
+
+  //
+  // Decoding tasks, to be run on decode_thread_.
+  //
+
+  // Task to finish initialization on decoder_thread_.
+  void InitializeTask();
+
+  // Enqueue a BitstreamBuffer to decode.  This will enqueue a buffer to the
+  // decoder_input_queue_, then queue a DecodeBufferTask() to actually decode
+  // the buffer.
+  void DecodeTask(const BitstreamBuffer& bitstream_buffer);
+
+  // Decode from the buffers queued in decoder_input_queue_.  Calls
+  // DecodeBufferInitial() or DecodeBufferContinue() as appropriate.
+  void DecodeBufferTask();
+  // Advance to the next fragment that begins a frame.
+  bool AdvanceFrameFragment(const uint8_t* data, size_t size, size_t* endpos);
+  // Schedule another DecodeBufferTask() if we're behind.
+  void ScheduleDecodeBufferTaskIfNeeded();
+
+  // Return true if we should continue to schedule DecodeBufferTask()s after
+  // completion.  Store the amount of input actually consumed in |endpos|.
+  bool DecodeBufferInitial(const void* data, size_t size, size_t* endpos);
+  bool DecodeBufferContinue(const void* data, size_t size);
+
+  // Accumulate data for the next frame to decode.  May return false in
+  // non-error conditions; for example when pipeline is full and should be
+  // retried later.
+  bool AppendToInputFrame(const void* data, size_t size);
+  // Flush data for one decoded frame.
+  bool FlushInputFrame();
+
+  // Allocate V4L2 buffers and assign them to |buffers| provided by the client
+  // via AssignPictureBuffers() on decoder thread.
+  void AssignPictureBuffersTask(const std::vector<PictureBuffer>& buffers);
+
+  // Use buffer backed by dmabuf file descriptors in |dmabuf_fds| for the
+  // OutputRecord associated with |picture_buffer_id|, taking ownership of the
+  // file descriptors.
+  void ImportBufferForPictureTask(int32_t picture_buffer_id,
+                                  std::vector<base::ScopedFD> dmabuf_fds);
+
+  // Service I/O on the V4L2 devices.  This task should only be scheduled from
+  // DevicePollTask().  If |event_pending| is true, one or more events
+  // on file descriptor are pending.
+  void ServiceDeviceTask(bool event_pending);
+  // Handle the various device queues.
+  void Enqueue();
+  void Dequeue();
+  // Dequeue one input buffer. Return true if success.
+  bool DequeueInputBuffer();
+  // Dequeue one output buffer. Return true if success.
+  bool DequeueOutputBuffer();
+
+  // Return true if there is a resolution change event pending.
+  bool DequeueResolutionChangeEvent();
+
+  // Enqueue a buffer on the corresponding queue.
+  bool EnqueueInputRecord();
+  bool EnqueueOutputRecord();
+
+  // Process a ReusePictureBuffer() API call.  The API call create an EGLSync
+  // object on the main (GPU process) thread; we will record this object so we
+  // can wait on it before reusing the buffer.
+  void ReusePictureBufferTask(int32_t picture_buffer_id);
+
+  // Flush() task.  Child thread should not submit any more buffers until it
+  // receives the NotifyFlushDone callback.  This task will schedule an empty
+  // BitstreamBufferRef (with input_id == kFlushBufferId) to perform the flush.
+  void FlushTask();
+  // Notify the client of a flush completion, if required.  This should be
+  // called any time a relevant queue could potentially be emptied: see
+  // function definition.
+  void NotifyFlushDoneIfNeeded();
+  // Returns true if VIDIOC_DECODER_CMD is supported.
+  bool IsDecoderCmdSupported();
+  // Send V4L2_DEC_CMD_START to the driver. Return true if success.
+  bool SendDecoderCmdStop();
+
+  // Reset() task.  Drop all input buffers. If V4L2VDA is not doing resolution
+  // change or waiting picture buffers, call FinishReset.
+  void ResetTask();
+  // This will schedule a ResetDoneTask() that will send the NotifyResetDone
+  // callback, then set the decoder state to kResetting so that all intervening
+  // tasks will drain.
+  void FinishReset();
+  void ResetDoneTask();
+
+  // Device destruction task.
+  void DestroyTask();
+
+  // Start |device_poll_thread_|.
+  bool StartDevicePoll();
+
+  // Stop |device_poll_thread_|.
+  bool StopDevicePoll();
+
+  bool StopInputStream();
+  bool StopOutputStream();
+
+  void StartResolutionChange();
+  void FinishResolutionChange();
+
+  // Try to get output format and visible size, detected after parsing the
+  // beginning of the stream. Sets |again| to true if more parsing is needed.
+  // |visible_size| could be nullptr and ignored.
+  bool GetFormatInfo(struct v4l2_format* format,
+                     Size* visible_size,
+                     bool* again);
+  // Create output buffers for the given |format| and |visible_size|.
+  bool CreateBuffersForFormat(const struct v4l2_format& format,
+                              const Size& visible_size);
+
+  // Try to get |visible_size|. Return visible size, or, if querying it is not
+  // supported or produces invalid size, return |coded_size| instead.
+  Size GetVisibleSize(const Size& coded_size);
+
+  //
+  // Device tasks, to be run on device_poll_thread_.
+  //
+
+  // The device task.
+  void DevicePollTask(bool poll_device);
+
+  //
+  // Safe from any thread.
+  //
+
+  // Error notification (using PostTask() to child thread, if necessary).
+  void NotifyError(Error error);
+
+  // Set the decoder_state_ to kError and notify the client (if necessary).
+  void SetErrorState(Error error);
+
+  //
+  // Other utility functions.  Called on decoder_thread_, unless
+  // decoder_thread_ is not yet started, in which case the child thread can call
+  // these (e.g. in Initialize() or Destroy()).
+  //
+
+  // Create the buffers we need.
+  bool CreateInputBuffers();
+  bool CreateOutputBuffers();
+
+  // Destroy buffers.
+  void DestroyInputBuffers();
+  // In contrast to DestroyInputBuffers, which is called only on destruction,
+  // we call DestroyOutputBuffers also during playback, on resolution change.
+  // Even if anything fails along the way, we still want to go on and clean
+  // up as much as possible, so return false if this happens, so that the
+  // caller can error out on resolution change.
+  bool DestroyOutputBuffers();
+
+  // Set input and output formats before starting decode.
+  bool SetupFormats();
+
+  //
+  // Methods run on child thread.
+  //
+
+  // Send decoded pictures to PictureReady.
+  void SendPictureReady();
+
+  // Callback that indicates a picture has been cleared.
+  void PictureCleared();
+
+  // Our original calling task runner for the child thread.
+  scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
+
+  // Task runner Decode() and PictureReady() run on.
+  scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
+
+  // WeakPtr<> pointing to |this| for use in posting tasks from the decoder or
+  // device worker threads back to the child thread.  Because the worker threads
+  // are members of this class, any task running on those threads is guaranteed
+  // that this object is still alive.  As a result, tasks posted from the child
+  // thread to the decoder or device thread should use base::Unretained(this),
+  // and tasks posted the other way should use |weak_this_|.
+  base::WeakPtr<V4L2VideoDecodeAccelerator> weak_this_;
+
+  // To expose client callbacks from VideoDecodeAccelerator.
+  // NOTE: all calls to these objects *MUST* be executed on
+  // child_task_runner_.
+  std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
+  base::WeakPtr<Client> client_;
+  // Callbacks to |decode_client_| must be executed on |decode_task_runner_|.
+  base::WeakPtr<Client> decode_client_;
+
+  //
+  // Decoder state, owned and operated by decoder_thread_.
+  // Before decoder_thread_ has started, the decoder state is managed by
+  // the child (main) thread.  After decoder_thread_ has started, the decoder
+  // thread should be the only one managing these.
+  //
+
+  // This thread services tasks posted from the VDA API entry points by the
+  // child thread and device service callbacks posted from the device thread.
+  base::Thread decoder_thread_;
+  // Decoder state machine state.
+  State decoder_state_;
+
+  Config::OutputMode output_mode_;
+
+  // BitstreamBuffer we're presently reading.
+  std::unique_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
+  // The V4L2Device this class is operating upon.
+  scoped_refptr<V4L2Device> device_;
+  // FlushTask() and ResetTask() should not affect buffers that have been
+  // queued afterwards.  For flushing or resetting the pipeline then, we will
+  // delay these buffers until after the flush or reset completes.
+  int decoder_delay_bitstream_buffer_id_;
+  // Input buffer we're presently filling.
+  int decoder_current_input_buffer_;
+  // We track the number of buffer decode tasks we have scheduled, since each
+  // task execution should complete one buffer.  If we fall behind (due to
+  // resource backpressure, etc.), we'll have to schedule more to catch up.
+  int decoder_decode_buffer_tasks_scheduled_;
+  // Picture buffers held by the client.
+  int decoder_frames_at_client_;
+
+  // Are we flushing?
+  bool decoder_flushing_;
+  // True if VIDIOC_DECODER_CMD is supported.
+  bool decoder_cmd_supported_;
+  // True if flushing is waiting for last output buffer. After
+  // VIDIOC_DECODER_CMD is sent to the driver, this flag will be set to true to
+  // wait for the last output buffer. When this flag is true, flush done will
+  // not be sent. After an output buffer that has the flag V4L2_BUF_FLAG_LAST is
+  // received, this is set to false.
+  bool flush_awaiting_last_output_buffer_;
+
+  // Got a reset request while we were performing resolution change or waiting
+  // picture buffers.
+  bool reset_pending_;
+  // Input queue for decoder_thread_: BitstreamBuffers in.
+  std::queue<linked_ptr<BitstreamBufferRef>> decoder_input_queue_;
+  // For H264 decode, hardware requires that we send it frame-sized chunks.
+  // We'll need to parse the stream.
+  std::unique_ptr<H264Parser> decoder_h264_parser_;
+  // Set if the decoder has a pending incomplete frame in an input buffer.
+  bool decoder_partial_frame_pending_;
+
+  //
+  // Hardware state and associated queues.  Since decoder_thread_ services
+  // the hardware, decoder_thread_ owns these too.
+  // output_buffer_map_, free_output_buffers_ and output_planes_count_ are an
+  // exception during the buffer (re)allocation sequence, when the
+  // decoder_thread_ is blocked briefly while the Child thread manipulates
+  // them.
+  //
+
+  // Completed decode buffers.
+  std::queue<int> input_ready_queue_;
+
+  // Input buffer state.
+  bool input_streamon_;
+  // Input buffers enqueued to device.
+  int input_buffer_queued_count_;
+  // Input buffers ready to use, as a LIFO since we don't care about ordering.
+  std::vector<int> free_input_buffers_;
+  // Mapping of int index to input buffer record.
+  std::vector<InputRecord> input_buffer_map_;
+
+  // Output buffer state.
+  bool output_streamon_;
+  // Output buffers enqueued to device.
+  int output_buffer_queued_count_;
+  // Output buffers ready to use, as a FIFO since we want oldest-first to hide
+  // synchronization latency with GL.
+  std::list<int> free_output_buffers_;
+  // Mapping of int index to output buffer record.
+  std::vector<OutputRecord> output_buffer_map_;
+  // Required size of DPB for decoding.
+  int output_dpb_size_;
+
+  // Number of planes (i.e. separate memory buffers) for output.
+  size_t output_planes_count_;
+
+  // Pictures that are ready but not sent to PictureReady yet.
+  std::queue<PictureRecord> pending_picture_ready_;
+
+  // The number of pictures that are sent to PictureReady and will be cleared.
+  int picture_clearing_count_;
+
+  // Output picture coded size.
+  Size coded_size_;
+
+  // Output picture visible size.
+  Size visible_size_;
+
+  //
+  // The device polling thread handles notifications of V4L2 device changes.
+  //
+
+  // The thread.
+  base::Thread device_poll_thread_;
+
+  //
+  // Other state, held by the child (main) thread.
+  //
+
+  // The codec we'll be decoding for.
+  VideoCodecProfile video_profile_;
+  // Chosen input format for video_profile_.
+  uint32_t input_format_fourcc_;
+  // Chosen output format.
+  uint32_t output_format_fourcc_;
+
+  // Input format V4L2 fourccs this class supports.
+  static const uint32_t supported_input_fourccs_[];
+
+  // The WeakPtrFactory for |weak_this_|.
+  base::WeakPtrFactory<V4L2VideoDecodeAccelerator> weak_this_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(V4L2VideoDecodeAccelerator);
+};
+
+}  // namespace media
+
+#endif  // MEDIA_GPU_V4L2_VIDEO_DECODE_ACCELERATOR_H_
diff --git a/vda/video_codecs.cc b/vda/video_codecs.cc
index 995ee38..61d0708 100644
--- a/vda/video_codecs.cc
+++ b/vda/video_codecs.cc
@@ -1,6 +1,8 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: b03fc92
+// Note: only necessary functions are ported.
 
 #include "video_codecs.h"
 
diff --git a/vda/video_codecs.h b/vda/video_codecs.h
index 30df7ec..2c88d50 100644
--- a/vda/video_codecs.h
+++ b/vda/video_codecs.h
@@ -1,6 +1,8 @@
 // Copyright 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: b03fc92
+// Note: only necessary functions are ported.
 
 #ifndef VIDEO_CODECS_H_
 #define VIDEO_CODECS_H_
diff --git a/vda/video_decode_accelerator.cc b/vda/video_decode_accelerator.cc
index 49afd44..e74d1ec 100644
--- a/vda/video_decode_accelerator.cc
+++ b/vda/video_decode_accelerator.cc
@@ -1,6 +1,7 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #include "base/logging.h"
 
@@ -27,7 +28,7 @@
   NOTREACHED() << "By default deferred initialization is not supported.";
 }
 
-VideoDecodeAccelerator::~VideoDecodeAccelerator() {}
+VideoDecodeAccelerator::~VideoDecodeAccelerator() = default;
 
 bool VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
     const base::WeakPtr<Client>& decode_client,
@@ -39,21 +40,22 @@
 
 void VideoDecodeAccelerator::ImportBufferForPicture(
     int32_t picture_buffer_id,
-    const std::vector<base::FileDescriptor>& dmabuf_fds) {
+    VideoPixelFormat pixel_format,
+    const NativePixmapHandle& native_pixmap_handle) {
   NOTREACHED() << "Buffer import not supported.";
 }
 
 VideoDecodeAccelerator::SupportedProfile::SupportedProfile()
     : profile(VIDEO_CODEC_PROFILE_UNKNOWN), encrypted_only(false) {}
 
-VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() {}
+VideoDecodeAccelerator::SupportedProfile::~SupportedProfile() = default;
 
 VideoDecodeAccelerator::Capabilities::Capabilities() : flags(NO_FLAGS) {}
 
 VideoDecodeAccelerator::Capabilities::Capabilities(const Capabilities& other) =
     default;
 
-VideoDecodeAccelerator::Capabilities::~Capabilities() {}
+VideoDecodeAccelerator::Capabilities::~Capabilities() = default;
 
 std::string VideoDecodeAccelerator::Capabilities::AsHumanReadableString()
     const {
diff --git a/vda/video_decode_accelerator.h b/vda/video_decode_accelerator.h
index 8343abe..10601be 100644
--- a/vda/video_decode_accelerator.h
+++ b/vda/video_decode_accelerator.h
@@ -1,17 +1,18 @@
 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 85fdf90
 
 #ifndef VIDEO_DECODE_ACCELERATOR_H_
 #define VIDEO_DECODE_ACCELERATOR_H_
 
 #include <vector>
 
-#include "base/file_descriptor_posix.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
 
 #include "bitstream_buffer.h"
+#include "native_pixmap_handle.h"
 #include "picture.h"
 #include "size.h"
 #include "video_codecs.h"
@@ -245,16 +246,21 @@
   virtual void AssignPictureBuffers(
       const std::vector<PictureBuffer>& buffers) = 0;
 
-  // Imports |dmabuf_fds| as backing memory for picture buffer
-  // associated with |picture_buffer_id|. This can only be be used if the VDA
-  // has been Initialize()d with config.output_mode = IMPORT, and should be
-  // preceded by a call to AssignPictureBuffers() to set up the number of
-  // PictureBuffers and their details.
+  // Imports |gpu_memory_buffer_handle|, pointing to a buffer in |pixel_format|,
+  // as backing memory for picture buffer associated with |picture_buffer_id|.
+  // This can only be be used if the VDA has been Initialize()d with
+  // config.output_mode = IMPORT, and should be preceded by a call to
+  // AssignPictureBuffers() to set up the number of PictureBuffers and their
+  // details.
+  // The |pixel_format| used here may be different from the |pixel_format|
+  // required in ProvidePictureBuffers(). If the buffer cannot be imported an
+  // error should be notified via NotifyError().
   // After this call, the VDA becomes the owner of those file descriptors,
   // and is responsible for closing it after use, also on import failure.
   virtual void ImportBufferForPicture(
       int32_t picture_buffer_id,
-      const std::vector<base::FileDescriptor>& dmabuf_fds);
+      VideoPixelFormat pixel_format,
+      const NativePixmapHandle& native_pixmap_handle);
 
   // Sends picture buffers to be reused by the decoder. This needs to be called
   // for each buffer that has been processed so that decoder may know onto which
diff --git a/vda/video_pixel_format.h b/vda/video_pixel_format.h
index d593dad..7f75cc4 100644
--- a/vda/video_pixel_format.h
+++ b/vda/video_pixel_format.h
@@ -1,6 +1,8 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 006301b
+// Note: only necessary functions are ported from video_types.h
 
 #ifndef VIDEO_PIXEL_FORMAT_H_
 #define VIDEO_PIXEL_FORMAT_H_
@@ -15,10 +17,14 @@
   PIXEL_FORMAT_UNKNOWN = 0,  // Unknown or unspecified format value.
   PIXEL_FORMAT_I420 =
       1,  // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
-  PIXEL_FORMAT_YV12 = 2,   // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
-  PIXEL_FORMAT_YV16 = 3,   // 16bpp YVU planar 1x1 Y, 2x1 VU samples.
-  PIXEL_FORMAT_YV12A = 4,  // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
-  PIXEL_FORMAT_YV24 = 5,   // 24bpp YUV planar, no subsampling.
+
+  // Note: Chrome does not actually support YVU compositing, so you probably
+  // don't actually want to use this. See http://crbug.com/784627.
+  PIXEL_FORMAT_YV12 = 2,  // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+
+  PIXEL_FORMAT_I422 = 3,   // 16bpp YUV planar 1x1 Y, 2x1 UV samples.
+  PIXEL_FORMAT_I420A = 4,  // 20bpp YUVA planar 1x1 Y, 2x2 UV, 1x1 A samples.
+  PIXEL_FORMAT_I444 = 5,   // 24bpp YUV planar, no subsampling.
   PIXEL_FORMAT_NV12 =
       6,  // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
   PIXEL_FORMAT_NV21 =
@@ -42,6 +48,8 @@
   // Plane size = Row pitch * (((height+31)/32)*32)
   PIXEL_FORMAT_MT21 = 15,
 
+  // The P* in the formats below designates the number of bits per pixel. I.e.
+  // P9 is 9-bits per pixel, P10 is 10-bits per pixel, etc.
   PIXEL_FORMAT_YUV420P9 = 16,
   PIXEL_FORMAT_YUV420P10 = 17,
   PIXEL_FORMAT_YUV422P9 = 18,
@@ -53,15 +61,12 @@
   PIXEL_FORMAT_YUV422P12 = 23,
   PIXEL_FORMAT_YUV444P12 = 24,
 
-  PIXEL_FORMAT_Y8 = 25,   // single 8bpp plane.
+  /* PIXEL_FORMAT_Y8 = 25, Deprecated */
   PIXEL_FORMAT_Y16 = 26,  // single 16bpp plane.
 
-  PIXEL_FORMAT_I422 =
-      27,  // 16bpp YUV planar 1x1 Y, 2x1 UV samples, a.k.a. YU16.
-
   // Please update UMA histogram enumeration when adding new formats here.
   PIXEL_FORMAT_MAX =
-      PIXEL_FORMAT_I422,  // Must always be equal to largest entry logged.
+      PIXEL_FORMAT_Y16,  // Must always be equal to largest entry logged.
 };
 
 }  // namespace media
diff --git a/vda/vp8_bool_decoder.cc b/vda/vp8_bool_decoder.cc
index e42aef0..68f06d0 100644
--- a/vda/vp8_bool_decoder.cc
+++ b/vda/vp8_bool_decoder.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 //
+// Note: ported from Chromium commit head: 9b6f429
 
 /*
  * Copyright (c) 2010, The WebM Project authors. All rights reserved.
@@ -104,7 +105,7 @@
   int shift = VP8_BD_VALUE_BIT - CHAR_BIT - (count_ + CHAR_BIT);
   size_t bytes_left = user_buffer_end_ - user_buffer_;
   size_t bits_left = bytes_left * CHAR_BIT;
-  int x = static_cast<int>(shift + CHAR_BIT - bits_left);
+  int x = shift + CHAR_BIT - static_cast<int>(bits_left);
   int loop_end = 0;
 
   if (x >= 0) {
@@ -140,7 +141,7 @@
   size_t shift = kVp8Norm[range_];
   range_ <<= shift;
   value_ <<= shift;
-  count_ -= shift;
+  count_ -= static_cast<int>(shift);
 
   DCHECK_EQ(1U, (range_ >> 7));  // In the range [128, 255].
 
diff --git a/vda/vp8_bool_decoder.h b/vda/vp8_bool_decoder.h
index 445fd68..4b8e3a5 100644
--- a/vda/vp8_bool_decoder.h
+++ b/vda/vp8_bool_decoder.h
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 //
+// Note: ported from Chromium commit head: 1323b9c
 
 /*
  * Copyright (c) 2010, The WebM Project authors. All rights reserved.
diff --git a/vda/vp8_decoder.cc b/vda/vp8_decoder.cc
index d9ee6e4..cd2d58b 100644
--- a/vda/vp8_decoder.cc
+++ b/vda/vp8_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
 
 #include "vp8_decoder.h"
 
@@ -93,6 +94,7 @@
   if (!curr_pic_)
     return kRanOutOfSurfaces;
 
+  curr_pic_->visible_rect = Rect(pic_size_);
   if (!DecodeAndOutputCurrentFrame())
     return kDecodeError;
 
diff --git a/vda/vp8_decoder.h b/vda/vp8_decoder.h
index 653da40..58211f6 100644
--- a/vda/vp8_decoder.h
+++ b/vda/vp8_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 60f9667
 
 #ifndef VP8_DECODER_H_
 #define VP8_DECODER_H_
diff --git a/vda/vp8_parser.cc b/vda/vp8_parser.cc
index 46eb669..5367545 100644
--- a/vda/vp8_parser.cc
+++ b/vda/vp8_parser.cc
@@ -4,6 +4,8 @@
 //
 // This file contains an implementation of a VP8 raw stream parser,
 // as defined in RFC 6386.
+// Note: ported from Chromium commit head: 2de6929
+
 
 #include "base/logging.h"
 #include "vp8_parser.h"
@@ -51,8 +53,7 @@
 Vp8Parser::Vp8Parser() : stream_(nullptr), bytes_left_(0) {
 }
 
-Vp8Parser::~Vp8Parser() {
-}
+Vp8Parser::~Vp8Parser() = default;
 
 bool Vp8Parser::ParseFrame(const uint8_t* ptr,
                            size_t frame_size,
diff --git a/vda/vp8_parser.h b/vda/vp8_parser.h
index ef9326c..c75e6cc 100644
--- a/vda/vp8_parser.h
+++ b/vda/vp8_parser.h
@@ -4,6 +4,7 @@
 //
 // This file contains an implementation of a VP8 raw stream parser,
 // as defined in RFC 6386.
+// Note: ported from Chromium commit head: 1323b9c
 
 #ifndef VP8_PARSER_H_
 #define VP8_PARSER_H_
diff --git a/vda/vp8_picture.cc b/vda/vp8_picture.cc
index 59938aa..b9030ce 100644
--- a/vda/vp8_picture.cc
+++ b/vda/vp8_picture.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
 
 #include "vp8_picture.h"
 
diff --git a/vda/vp8_picture.h b/vda/vp8_picture.h
index eb253a4..bd04ec7 100644
--- a/vda/vp8_picture.h
+++ b/vda/vp8_picture.h
@@ -1,25 +1,30 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef VP8_PICTURE_H_
 #define VP8_PICTURE_H_
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "rect.h"
 
 namespace media {
 
 class V4L2VP8Picture;
 
-class VP8Picture : public base::RefCounted<VP8Picture> {
+class VP8Picture : public base::RefCountedThreadSafe<VP8Picture> {
  public:
   VP8Picture();
 
   virtual V4L2VP8Picture* AsV4L2VP8Picture();
 
+  // The visible size of picture.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<VP8Picture>;
+  friend class base::RefCountedThreadSafe<VP8Picture>;
   virtual ~VP8Picture();
 
   DISALLOW_COPY_AND_ASSIGN(VP8Picture);
diff --git a/vda/vp9_bool_decoder.cc b/vda/vp9_bool_decoder.cc
index bf227b2..1d2b6f4 100644
--- a/vda/vp9_bool_decoder.cc
+++ b/vda/vp9_bool_decoder.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 1323b9c
 
 #include "vp9_bool_decoder.h"
 
@@ -35,9 +36,9 @@
 };
 }  // namespace
 
-Vp9BoolDecoder::Vp9BoolDecoder() {}
+Vp9BoolDecoder::Vp9BoolDecoder() = default;
 
-Vp9BoolDecoder::~Vp9BoolDecoder() {}
+Vp9BoolDecoder::~Vp9BoolDecoder() = default;
 
 // 9.2.1 Initialization process for Boolean decoder
 bool Vp9BoolDecoder::Initialize(const uint8_t* data, size_t size) {
diff --git a/vda/vp9_bool_decoder.h b/vda/vp9_bool_decoder.h
index 3862e51..50c386f 100644
--- a/vda/vp9_bool_decoder.h
+++ b/vda/vp9_bool_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_BOOL_DECODER_H_
 #define VP9_BOOL_DECODER_H_
diff --git a/vda/vp9_compressed_header_parser.cc b/vda/vp9_compressed_header_parser.cc
index d5ee772..524472f 100644
--- a/vda/vp9_compressed_header_parser.cc
+++ b/vda/vp9_compressed_header_parser.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_compressed_header_parser.h"
 
@@ -59,7 +60,7 @@
 
 }  // namespace
 
-Vp9CompressedHeaderParser::Vp9CompressedHeaderParser() {}
+Vp9CompressedHeaderParser::Vp9CompressedHeaderParser() = default;
 
 // 6.3.1 Tx mode syntax
 void Vp9CompressedHeaderParser::ReadTxMode(Vp9FrameHeader* fhdr) {
diff --git a/vda/vp9_compressed_header_parser.h b/vda/vp9_compressed_header_parser.h
index 032a880..5f5ff56 100644
--- a/vda/vp9_compressed_header_parser.h
+++ b/vda/vp9_compressed_header_parser.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_COMPRESSED_HEADER_PARSER_H_
 #define VP9_COMPRESSED_HEADER_PARSER_H_
diff --git a/vda/vp9_decoder.cc b/vda/vp9_decoder.cc
index 2ea6d16..d8af03d 100644
--- a/vda/vp9_decoder.cc
+++ b/vda/vp9_decoder.cc
@@ -1,7 +1,9 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 7441087
 
+#include "rect.h"
 #include "vp9_decoder.h"
 
 #include <memory>
@@ -136,6 +138,18 @@
     if (!pic)
       return kRanOutOfSurfaces;
 
+    Rect new_render_rect(curr_frame_hdr_->render_width,
+                         curr_frame_hdr_->render_height);
+    // For safety, check the validity of render size or leave it as (0, 0).
+    if (!Rect(pic_size_).Contains(new_render_rect)) {
+      DVLOG(1) << "Render size exceeds picture size. render size: "
+               << new_render_rect.ToString()
+               << ", picture size: " << pic_size_.ToString();
+      new_render_rect = Rect();
+    }
+    DVLOG(2) << "Render resolution: " << new_render_rect.ToString();
+
+    pic->visible_rect = new_render_rect;
     pic->frame_hdr.reset(curr_frame_hdr_.release());
 
     if (!DecodeAndOutputPicture(pic)) {
diff --git a/vda/vp9_decoder.h b/vda/vp9_decoder.h
index 77a8d88..cdbcd69 100644
--- a/vda/vp9_decoder.h
+++ b/vda/vp9_decoder.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 77118c9
 
 #ifndef VP9_DECODER_H_
 #define VP9_DECODER_H_
diff --git a/vda/vp9_parser.cc b/vda/vp9_parser.cc
index de51c7b..bbd90b9 100644
--- a/vda/vp9_parser.cc
+++ b/vda/vp9_parser.cc
@@ -8,6 +8,7 @@
 //  1 something wrong in bitstream
 //  2 parsing steps
 //  3 parsed values (selected)
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_parser.h"
 
@@ -22,6 +23,133 @@
 
 namespace media {
 
+namespace {
+
+// Coefficients extracted verbatim from "VP9 Bitstream & Decoding Process
+// Specification" Version 0.6, Sec 8.6.1 Dequantization functions, see:
+// https://www.webmproject.org/vp9/#draft-vp9-bitstream-and-decoding-process-specification
+constexpr size_t kQIndexRange = 256;
+// clang-format off
+// libva is the only user of high bit depth VP9 formats and only supports
+// 10 bits per component, see https://github.com/01org/libva/issues/137.
+// TODO(mcasas): Add the 12 bit versions of these tables.
+const int16_t kDcQLookup[][kQIndexRange] = {
+    {
+        4,    8,    8,    9,    10,   11,   12,   12,  13,   14,   15,   16,
+        17,   18,   19,   19,   20,   21,   22,   23,  24,   25,   26,   26,
+        27,   28,   29,   30,   31,   32,   32,   33,  34,   35,   36,   37,
+        38,   38,   39,   40,   41,   42,   43,   43,  44,   45,   46,   47,
+        48,   48,   49,   50,   51,   52,   53,   53,  54,   55,   56,   57,
+        57,   58,   59,   60,   61,   62,   62,   63,  64,   65,   66,   66,
+        67,   68,   69,   70,   70,   71,   72,   73,  74,   74,   75,   76,
+        77,   78,   78,   79,   80,   81,   81,   82,  83,   84,   85,   85,
+        87,   88,   90,   92,   93,   95,   96,   98,  99,   101,  102,  104,
+        105,  107,  108,  110,  111,  113,  114,  116, 117,  118,  120,  121,
+        123,  125,  127,  129,  131,  134,  136,  138, 140,  142,  144,  146,
+        148,  150,  152,  154,  156,  158,  161,  164, 166,  169,  172,  174,
+        177,  180,  182,  185,  187,  190,  192,  195, 199,  202,  205,  208,
+        211,  214,  217,  220,  223,  226,  230,  233, 237,  240,  243,  247,
+        250,  253,  257,  261,  265,  269,  272,  276, 280,  284,  288,  292,
+        296,  300,  304,  309,  313,  317,  322,  326, 330,  335,  340,  344,
+        349,  354,  359,  364,  369,  374,  379,  384, 389,  395,  400,  406,
+        411,  417,  423,  429,  435,  441,  447,  454, 461,  467,  475,  482,
+        489,  497,  505,  513,  522,  530,  539,  549, 559,  569,  579,  590,
+        602,  614,  626,  640,  654,  668,  684,  700, 717,  736,  755,  775,
+        796,  819,  843,  869,  896,  925,  955,  988, 1022, 1058, 1098, 1139,
+        1184, 1232, 1282, 1336,
+    },
+    {
+        4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,
+        37,   40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,
+        78,   82,   86,   90,   93,   97,   101,  105,  109,  113,  116,  120,
+        124,  128,  132,  136,  140,  143,  147,  151,  155,  159,  163,  166,
+        170,  174,  178,  182,  185,  189,  193,  197,  200,  204,  208,  212,
+        215,  219,  223,  226,  230,  233,  237,  241,  244,  248,  251,  255,
+        259,  262,  266,  269,  273,  276,  280,  283,  287,  290,  293,  297,
+        300,  304,  307,  310,  314,  317,  321,  324,  327,  331,  334,  337,
+        343,  350,  356,  362,  369,  375,  381,  387,  394,  400,  406,  412,
+        418,  424,  430,  436,  442,  448,  454,  460,  466,  472,  478,  484,
+        490,  499,  507,  516,  525,  533,  542,  550,  559,  567,  576,  584,
+        592,  601,  609,  617,  625,  634,  644,  655,  666,  676,  687,  698,
+        708,  718,  729,  739,  749,  759,  770,  782,  795,  807,  819,  831,
+        844,  856,  868,  880,  891,  906,  920,  933,  947,  961,  975,  988,
+        1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170,
+        1186, 1202, 1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379,
+        1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624,
+        1647, 1670, 1692, 1717, 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929,
+        1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, 2236, 2276, 2319, 2363,
+        2410, 2458, 2508, 2561, 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102,
+        3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, 4089, 4236, 4394, 4559,
+        4737, 4929, 5130, 5347
+   }
+};
+
+const int16_t kAcQLookup[][kQIndexRange] = {
+    {
+        4,    8,    9,    10,   11,   12,   13,   14,   15,   16,   17,   18,
+        19,   20,   21,   22,   23,   24,   25,   26,   27,   28,   29,   30,
+        31,   32,   33,   34,   35,   36,   37,   38,   39,   40,   41,   42,
+        43,   44,   45,   46,   47,   48,   49,   50,   51,   52,   53,   54,
+        55,   56,   57,   58,   59,   60,   61,   62,   63,   64,   65,   66,
+        67,   68,   69,   70,   71,   72,   73,   74,   75,   76,   77,   78,
+        79,   80,   81,   82,   83,   84,   85,   86,   87,   88,   89,   90,
+        91,   92,   93,   94,   95,   96,   97,   98,   99,   100,  101,  102,
+        104,  106,  108,  110,  112,  114,  116,  118,  120,  122,  124,  126,
+        128,  130,  132,  134,  136,  138,  140,  142,  144,  146,  148,  150,
+        152,  155,  158,  161,  164,  167,  170,  173,  176,  179,  182,  185,
+        188,  191,  194,  197,  200,  203,  207,  211,  215,  219,  223,  227,
+        231,  235,  239,  243,  247,  251,  255,  260,  265,  270,  275,  280,
+        285,  290,  295,  300,  305,  311,  317,  323,  329,  335,  341,  347,
+        353,  359,  366,  373,  380,  387,  394,  401,  408,  416,  424,  432,
+        440,  448,  456,  465,  474,  483,  492,  501,  510,  520,  530,  540,
+        550,  560,  571,  582,  593,  604,  615,  627,  639,  651,  663,  676,
+        689,  702,  715,  729,  743,  757,  771,  786,  801,  816,  832,  848,
+        864,  881,  898,  915,  933,  951,  969,  988,  1007, 1026, 1046, 1066,
+        1087, 1108, 1129, 1151, 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+        1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, 1597, 1628, 1660, 1692,
+        1725, 1759, 1793, 1828,
+    },
+    {
+        4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,
+        40,   44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,
+        88,   92,   96,   100,  105,  109,  114,  118,  122,  127,  131,  136,
+        140,  145,  149,  154,  158,  163,  168,  172,  177,  181,  186,  190,
+        195,  199,  204,  208,  213,  217,  222,  226,  231,  235,  240,  244,
+        249,  253,  258,  262,  267,  271,  275,  280,  284,  289,  293,  297,
+        302,  306,  311,  315,  319,  324,  328,  332,  337,  341,  345,  349,
+        354,  358,  362,  367,  371,  375,  379,  384,  388,  392,  396,  401,
+        409,  417,  425,  433,  441,  449,  458,  466,  474,  482,  490,  498,
+        506,  514,  523,  531,  539,  547,  555,  563,  571,  579,  588,  596,
+        604,  616,  628,  640,  652,  664,  676,  688,  700,  713,  725,  737,
+        749,  761,  773,  785,  797,  809,  825,  841,  857,  873,  889,  905,
+        922,  938,  954,  970,  986,  1002, 1018, 1038, 1058, 1078, 1098, 1118,
+        1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386,
+        1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727,
+        1759, 1791, 1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159,
+        2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703,
+        2755, 2807, 2859, 2915, 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391,
+        3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, 4028, 4104, 4184, 4264,
+        4348, 4432, 4516, 4604, 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372,
+        5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, 6388, 6512, 6640, 6768,
+        6900, 7036, 7172, 7312
+   }
+};
+// clang-format on
+
+static_assert(arraysize(kDcQLookup[0]) == arraysize(kAcQLookup[0]),
+              "quantizer lookup arrays of incorrect size");
+
+size_t ClampQ(size_t q) {
+  return std::min(q, kQIndexRange - 1);
+}
+
+int ClampLf(int lf) {
+  const int kMaxLoopFilterLevel = 63;
+  return std::min(std::max(0, lf), kMaxLoopFilterLevel);
+}
+
+}  // namespace
+
 bool Vp9FrameHeader::IsKeyframe() const {
   // When show_existing_frame is true, the frame header does not precede an
   // actual frame to be decoded, so frame_type does not apply (and is not read
@@ -107,7 +235,7 @@
 Vp9Parser::Context::Vp9FrameContextManager::Vp9FrameContextManager()
     : weak_ptr_factory_(this) {}
 
-Vp9Parser::Context::Vp9FrameContextManager::~Vp9FrameContextManager() {}
+Vp9Parser::Context::Vp9FrameContextManager::~Vp9FrameContextManager() = default;
 
 const Vp9FrameContext&
 Vp9Parser::Context::Vp9FrameContextManager::frame_context() const {
@@ -205,7 +333,7 @@
   Reset();
 }
 
-Vp9Parser::~Vp9Parser() {}
+Vp9Parser::~Vp9Parser() = default;
 
 void Vp9Parser::SetStream(const uint8_t* stream, off_t stream_size) {
   DCHECK(stream);
@@ -223,14 +351,106 @@
   context_.Reset();
 }
 
+bool Vp9Parser::ParseUncompressedHeader(const FrameInfo& frame_info,
+                                        Vp9FrameHeader* fhdr,
+                                        Result* result) {
+  memset(&curr_frame_header_, 0, sizeof(curr_frame_header_));
+  *result = kInvalidStream;
+
+  Vp9UncompressedHeaderParser uncompressed_parser(&context_);
+  if (!uncompressed_parser.Parse(frame_info.ptr, frame_info.size,
+                                 &curr_frame_header_)) {
+    *result = kInvalidStream;
+    return true;
+  }
+
+  if (curr_frame_header_.header_size_in_bytes == 0) {
+    // Verify padding bits are zero.
+    for (off_t i = curr_frame_header_.uncompressed_header_size;
+         i < frame_info.size; i++) {
+      if (frame_info.ptr[i] != 0) {
+        DVLOG(1) << "Padding bits are not zeros.";
+        *result = kInvalidStream;
+        return true;
+      }
+    }
+    *fhdr = curr_frame_header_;
+    *result = kOk;
+    return true;
+  }
+  if (curr_frame_header_.uncompressed_header_size +
+          curr_frame_header_.header_size_in_bytes >
+      base::checked_cast<size_t>(frame_info.size)) {
+    DVLOG(1) << "header_size_in_bytes="
+             << curr_frame_header_.header_size_in_bytes
+             << " is larger than bytes left in buffer: "
+             << frame_info.size - curr_frame_header_.uncompressed_header_size;
+    *result = kInvalidStream;
+    return true;
+  }
+
+  return false;
+}
+
+bool Vp9Parser::ParseCompressedHeader(const FrameInfo& frame_info,
+                                      Result* result) {
+  *result = kInvalidStream;
+  size_t frame_context_idx = curr_frame_header_.frame_context_idx;
+  const Context::Vp9FrameContextManager& context_to_load =
+      context_.frame_context_managers_[frame_context_idx];
+  if (!context_to_load.initialized()) {
+    // 8.2 Frame order constraints
+    // must load an initialized set of probabilities.
+    DVLOG(1) << "loading uninitialized frame context, index="
+             << frame_context_idx;
+    *result = kInvalidStream;
+    return true;
+  }
+  if (context_to_load.needs_client_update()) {
+    DVLOG(3) << "waiting frame_context_idx=" << frame_context_idx
+             << " to update";
+    curr_frame_info_ = frame_info;
+    *result = kAwaitingRefresh;
+    return true;
+  }
+  curr_frame_header_.initial_frame_context = curr_frame_header_.frame_context =
+      context_to_load.frame_context();
+
+  Vp9CompressedHeaderParser compressed_parser;
+  if (!compressed_parser.Parse(
+          frame_info.ptr + curr_frame_header_.uncompressed_header_size,
+          curr_frame_header_.header_size_in_bytes, &curr_frame_header_)) {
+    *result = kInvalidStream;
+    return true;
+  }
+
+  if (curr_frame_header_.refresh_frame_context) {
+    // In frame parallel mode, we can refresh the context without decoding
+    // tile data.
+    if (curr_frame_header_.frame_parallel_decoding_mode) {
+      context_.UpdateFrameContext(frame_context_idx,
+                                  curr_frame_header_.frame_context);
+    } else {
+      context_.MarkFrameContextForUpdate(frame_context_idx);
+    }
+  }
+  return false;
+}
+
 Vp9Parser::Result Vp9Parser::ParseNextFrame(Vp9FrameHeader* fhdr) {
   DCHECK(fhdr);
   DVLOG(2) << "ParseNextFrame";
+  FrameInfo frame_info;
+  Result result;
 
   // If |curr_frame_info_| is valid, uncompressed header was parsed into
   // |curr_frame_header_| and we are awaiting context update to proceed with
   // compressed header parsing.
-  if (!curr_frame_info_.IsValid()) {
+  if (curr_frame_info_.IsValid()) {
+    DCHECK(parsing_compressed_header_);
+    frame_info = curr_frame_info_;
+    curr_frame_info_.Reset();
+  } else {
     if (frames_.empty()) {
       // No frames to be decoded, if there is no more stream, request more.
       if (!stream_)
@@ -244,85 +464,26 @@
       }
     }
 
-    curr_frame_info_ = frames_.front();
+    frame_info = frames_.front();
     frames_.pop_front();
 
-    memset(&curr_frame_header_, 0, sizeof(curr_frame_header_));
-
-    Vp9UncompressedHeaderParser uncompressed_parser(&context_);
-    if (!uncompressed_parser.Parse(curr_frame_info_.ptr, curr_frame_info_.size,
-                                   &curr_frame_header_))
-      return kInvalidStream;
-
-    if (curr_frame_header_.header_size_in_bytes == 0) {
-      // Verify padding bits are zero.
-      for (off_t i = curr_frame_header_.uncompressed_header_size;
-           i < curr_frame_info_.size; i++) {
-        if (curr_frame_info_.ptr[i] != 0) {
-          DVLOG(1) << "Padding bits are not zeros.";
-          return kInvalidStream;
-        }
-      }
-      *fhdr = curr_frame_header_;
-      curr_frame_info_.Reset();
-      return kOk;
-    }
-    if (curr_frame_header_.uncompressed_header_size +
-            curr_frame_header_.header_size_in_bytes >
-        base::checked_cast<size_t>(curr_frame_info_.size)) {
-      DVLOG(1) << "header_size_in_bytes="
-               << curr_frame_header_.header_size_in_bytes
-               << " is larger than bytes left in buffer: "
-               << curr_frame_info_.size -
-                      curr_frame_header_.uncompressed_header_size;
-      return kInvalidStream;
-    }
+    if (ParseUncompressedHeader(frame_info, fhdr, &result))
+      return result;
   }
 
   if (parsing_compressed_header_) {
-    size_t frame_context_idx = curr_frame_header_.frame_context_idx;
-    const Context::Vp9FrameContextManager& context_to_load =
-        context_.frame_context_managers_[frame_context_idx];
-    if (!context_to_load.initialized()) {
-      // 8.2 Frame order constraints
-      // must load an initialized set of probabilities.
-      DVLOG(1) << "loading uninitialized frame context, index="
-               << frame_context_idx;
-      return kInvalidStream;
-    }
-    if (context_to_load.needs_client_update()) {
-      DVLOG(3) << "waiting frame_context_idx=" << frame_context_idx
-               << " to update";
-      return kAwaitingRefresh;
-    }
-    curr_frame_header_.initial_frame_context =
-        curr_frame_header_.frame_context = context_to_load.frame_context();
-
-    Vp9CompressedHeaderParser compressed_parser;
-    if (!compressed_parser.Parse(
-            curr_frame_info_.ptr + curr_frame_header_.uncompressed_header_size,
-            curr_frame_header_.header_size_in_bytes, &curr_frame_header_)) {
-      return kInvalidStream;
-    }
-
-    if (curr_frame_header_.refresh_frame_context) {
-      // In frame parallel mode, we can refresh the context without decoding
-      // tile data.
-      if (curr_frame_header_.frame_parallel_decoding_mode) {
-        context_.UpdateFrameContext(frame_context_idx,
-                                    curr_frame_header_.frame_context);
-      } else {
-        context_.MarkFrameContextForUpdate(frame_context_idx);
-      }
+    if (ParseCompressedHeader(frame_info, &result)) {
+      DCHECK(result != kAwaitingRefresh || curr_frame_info_.IsValid());
+      return result;
     }
   }
 
-  SetupSegmentationDequant();
+  if (!SetupSegmentationDequant())
+    return kInvalidStream;
   SetupLoopFilter();
   UpdateSlots();
 
   *fhdr = curr_frame_header_;
-  curr_frame_info_.Reset();
   return kOk;
 }
 
@@ -398,86 +559,6 @@
   return frames;
 }
 
-// 8.6.1
-const size_t QINDEX_RANGE = 256;
-const int16_t kDcQLookup[QINDEX_RANGE] = {
-  4,       8,    8,    9,   10,   11,   12,   12,
-  13,     14,   15,   16,   17,   18,   19,   19,
-  20,     21,   22,   23,   24,   25,   26,   26,
-  27,     28,   29,   30,   31,   32,   32,   33,
-  34,     35,   36,   37,   38,   38,   39,   40,
-  41,     42,   43,   43,   44,   45,   46,   47,
-  48,     48,   49,   50,   51,   52,   53,   53,
-  54,     55,   56,   57,   57,   58,   59,   60,
-  61,     62,   62,   63,   64,   65,   66,   66,
-  67,     68,   69,   70,   70,   71,   72,   73,
-  74,     74,   75,   76,   77,   78,   78,   79,
-  80,     81,   81,   82,   83,   84,   85,   85,
-  87,     88,   90,   92,   93,   95,   96,   98,
-  99,    101,  102,  104,  105,  107,  108,  110,
-  111,   113,  114,  116,  117,  118,  120,  121,
-  123,   125,  127,  129,  131,  134,  136,  138,
-  140,   142,  144,  146,  148,  150,  152,  154,
-  156,   158,  161,  164,  166,  169,  172,  174,
-  177,   180,  182,  185,  187,  190,  192,  195,
-  199,   202,  205,  208,  211,  214,  217,  220,
-  223,   226,  230,  233,  237,  240,  243,  247,
-  250,   253,  257,  261,  265,  269,  272,  276,
-  280,   284,  288,  292,  296,  300,  304,  309,
-  313,   317,  322,  326,  330,  335,  340,  344,
-  349,   354,  359,  364,  369,  374,  379,  384,
-  389,   395,  400,  406,  411,  417,  423,  429,
-  435,   441,  447,  454,  461,  467,  475,  482,
-  489,   497,  505,  513,  522,  530,  539,  549,
-  559,   569,  579,  590,  602,  614,  626,  640,
-  654,   668,  684,  700,  717,  736,  755,  775,
-  796,   819,  843,  869,  896,  925,  955,  988,
-  1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
-};
-
-const int16_t kAcQLookup[QINDEX_RANGE] = {
-  4,       8,    9,   10,   11,   12,   13,   14,
-  15,     16,   17,   18,   19,   20,   21,   22,
-  23,     24,   25,   26,   27,   28,   29,   30,
-  31,     32,   33,   34,   35,   36,   37,   38,
-  39,     40,   41,   42,   43,   44,   45,   46,
-  47,     48,   49,   50,   51,   52,   53,   54,
-  55,     56,   57,   58,   59,   60,   61,   62,
-  63,     64,   65,   66,   67,   68,   69,   70,
-  71,     72,   73,   74,   75,   76,   77,   78,
-  79,     80,   81,   82,   83,   84,   85,   86,
-  87,     88,   89,   90,   91,   92,   93,   94,
-  95,     96,   97,   98,   99,  100,  101,  102,
-  104,   106,  108,  110,  112,  114,  116,  118,
-  120,   122,  124,  126,  128,  130,  132,  134,
-  136,   138,  140,  142,  144,  146,  148,  150,
-  152,   155,  158,  161,  164,  167,  170,  173,
-  176,   179,  182,  185,  188,  191,  194,  197,
-  200,   203,  207,  211,  215,  219,  223,  227,
-  231,   235,  239,  243,  247,  251,  255,  260,
-  265,   270,  275,  280,  285,  290,  295,  300,
-  305,   311,  317,  323,  329,  335,  341,  347,
-  353,   359,  366,  373,  380,  387,  394,  401,
-  408,   416,  424,  432,  440,  448,  456,  465,
-  474,   483,  492,  501,  510,  520,  530,  540,
-  550,   560,  571,  582,  593,  604,  615,  627,
-  639,   651,  663,  676,  689,  702,  715,  729,
-  743,   757,  771,  786,  801,  816,  832,  848,
-  864,   881,  898,  915,  933,  951,  969,  988,
-  1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
-  1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
-  1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
-  1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
-};
-
-static_assert(arraysize(kDcQLookup) == arraysize(kAcQLookup),
-              "quantizer lookup arrays of incorrect size");
-
-static size_t ClampQ(size_t q) {
-  return std::min(std::max(static_cast<size_t>(0), q),
-                  arraysize(kDcQLookup) - 1);
-}
-
 // 8.6.1 Dequantization functions
 size_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
                             size_t segid) const {
@@ -497,40 +578,40 @@
 }
 
 // 8.6.1 Dequantization functions
-void Vp9Parser::SetupSegmentationDequant() {
+bool Vp9Parser::SetupSegmentationDequant() {
   const Vp9QuantizationParams& quant = curr_frame_header_.quant_params;
   Vp9SegmentationParams& segmentation = context_.segmentation_;
 
-  DLOG_IF(ERROR, curr_frame_header_.bit_depth > 8)
-      << "bit_depth > 8 is not supported "
-         "yet, kDcQLookup and kAcQLookup "
-         "need extended";
+  if (curr_frame_header_.bit_depth > 10) {
+    DLOG(ERROR) << "bit_depth > 10 is not supported yet, kDcQLookup and "
+                   "kAcQLookup need to be extended";
+    return false;
+  }
+  const size_t bit_depth_index = (curr_frame_header_.bit_depth == 8) ? 0 : 1;
+
   if (segmentation.enabled) {
     for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; ++i) {
       const size_t q_index = GetQIndex(quant, i);
       segmentation.y_dequant[i][0] =
-          kDcQLookup[ClampQ(q_index + quant.delta_q_y_dc)];
-      segmentation.y_dequant[i][1] = kAcQLookup[ClampQ(q_index)];
+          kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+      segmentation.y_dequant[i][1] =
+          kAcQLookup[bit_depth_index][ClampQ(q_index)];
       segmentation.uv_dequant[i][0] =
-          kDcQLookup[ClampQ(q_index + quant.delta_q_uv_dc)];
+          kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
       segmentation.uv_dequant[i][1] =
-          kAcQLookup[ClampQ(q_index + quant.delta_q_uv_ac)];
+          kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
     }
   } else {
     const size_t q_index = quant.base_q_idx;
     segmentation.y_dequant[0][0] =
-        kDcQLookup[ClampQ(q_index + quant.delta_q_y_dc)];
-    segmentation.y_dequant[0][1] = kAcQLookup[ClampQ(q_index)];
+        kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
+    segmentation.y_dequant[0][1] = kAcQLookup[bit_depth_index][ClampQ(q_index)];
     segmentation.uv_dequant[0][0] =
-        kDcQLookup[ClampQ(q_index + quant.delta_q_uv_dc)];
+        kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_dc)];
     segmentation.uv_dequant[0][1] =
-        kAcQLookup[ClampQ(q_index + quant.delta_q_uv_ac)];
+        kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
   }
-}
-
-static int ClampLf(int lf) {
-  const int kMaxLoopFilterLevel = 63;
-  return std::min(std::max(0, lf), kMaxLoopFilterLevel);
+  return true;
 }
 
 // 8.8.1 Loop filter frame init process
diff --git a/vda/vp9_parser.h b/vda/vp9_parser.h
index c6e1d9f..ab1fa57 100644
--- a/vda/vp9_parser.h
+++ b/vda/vp9_parser.h
@@ -9,6 +9,7 @@
 //
 // See media::VP9Decoder for example usage.
 //
+// Note: ported from Chromium commit head: ec6c6e0
 #ifndef VP9_PARSER_H_
 #define VP9_PARSER_H_
 
@@ -404,8 +405,22 @@
 
   std::deque<FrameInfo> ParseSuperframe();
 
+  // Returns true and populates |result| with the parsing result if parsing of
+  // current frame is finished (possibly unsuccessfully). |fhdr| will only be
+  // populated and valid if |result| is kOk. Otherwise return false, indicating
+  // that the compressed header must be parsed next.
+  bool ParseUncompressedHeader(const FrameInfo& frame_info,
+                               Vp9FrameHeader* fhdr,
+                               Result* result);
+
+  // Returns true if parsing of current frame is finished and |result| will be
+  // populated with value of parsing result. Otherwise, needs to continue setup
+  // current frame.
+  bool ParseCompressedHeader(const FrameInfo& frame_info, Result* result);
+
   size_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
-  void SetupSegmentationDequant();
+  // Returns true if the setup succeeded.
+  bool SetupSegmentationDequant();
   void SetupLoopFilter();
   void UpdateSlots();
 
@@ -415,7 +430,7 @@
   // Remaining bytes in stream_.
   off_t bytes_left_;
 
-  bool parsing_compressed_header_;
+  const bool parsing_compressed_header_;
 
   // FrameInfo for the remaining frames in the current superframe to be parsed.
   std::deque<FrameInfo> frames_;
diff --git a/vda/vp9_picture.cc b/vda/vp9_picture.cc
index a99427f..df2c3b0 100644
--- a/vda/vp9_picture.cc
+++ b/vda/vp9_picture.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 6e70beb
 
 #include "vp9_picture.h"
 
diff --git a/vda/vp9_picture.h b/vda/vp9_picture.h
index 23e299b..efff37b 100644
--- a/vda/vp9_picture.h
+++ b/vda/vp9_picture.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 70340ce
 
 #ifndef VP9_PICTURE_H_
 #define VP9_PICTURE_H_
@@ -9,13 +10,14 @@
 
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "rect.h"
 #include "vp9_parser.h"
 
 namespace media {
 
 class V4L2VP9Picture;
 
-class VP9Picture : public base::RefCounted<VP9Picture> {
+class VP9Picture : public base::RefCountedThreadSafe<VP9Picture> {
  public:
   VP9Picture();
 
@@ -23,8 +25,13 @@
 
   std::unique_ptr<Vp9FrameHeader> frame_hdr;
 
+  // The visible size of picture. This could be either parsed from frame
+  // header, or set to Rect(0, 0) for indicating invalid values or
+  // not available.
+  Rect visible_rect;
+
  protected:
-  friend class base::RefCounted<VP9Picture>;
+  friend class base::RefCountedThreadSafe<VP9Picture>;
   virtual ~VP9Picture();
 
   DISALLOW_COPY_AND_ASSIGN(VP9Picture);
diff --git a/vda/vp9_raw_bits_reader.cc b/vda/vp9_raw_bits_reader.cc
index 7cad4d9..dea06e0 100644
--- a/vda/vp9_raw_bits_reader.cc
+++ b/vda/vp9_raw_bits_reader.cc
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: 2de6929
 
 #include "vp9_raw_bits_reader.h"
 
@@ -13,7 +14,7 @@
 
 Vp9RawBitsReader::Vp9RawBitsReader() : valid_(true) {}
 
-Vp9RawBitsReader::~Vp9RawBitsReader() {}
+Vp9RawBitsReader::~Vp9RawBitsReader() = default;
 
 void Vp9RawBitsReader::Initialize(const uint8_t* data, size_t size) {
   DCHECK(data);
diff --git a/vda/vp9_raw_bits_reader.h b/vda/vp9_raw_bits_reader.h
index 9f112b8..04ad413 100644
--- a/vda/vp9_raw_bits_reader.h
+++ b/vda/vp9_raw_bits_reader.h
@@ -1,6 +1,7 @@
 // Copyright 2015 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_RAW_BITS_READER_H_
 #define VP9_RAW_BITS_READER_H_
diff --git a/vda/vp9_uncompressed_header_parser.cc b/vda/vp9_uncompressed_header_parser.cc
index 067b40c..f6dc2eb 100644
--- a/vda/vp9_uncompressed_header_parser.cc
+++ b/vda/vp9_uncompressed_header_parser.cc
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: f06caa0
 
 #include "vp9_uncompressed_header_parser.h"
 
@@ -789,7 +790,7 @@
       for (size_t i = 0; i < Vp9LoopFilterParams::kNumModeDeltas; i++) {
         loop_filter.update_mode_deltas[i] = reader_.ReadBool();
         if (loop_filter.update_mode_deltas[i])
-          loop_filter.mode_deltas[i] = reader_.ReadLiteral(6);
+          loop_filter.mode_deltas[i] = reader_.ReadSignedLiteral(6);
       }
     }
   }
diff --git a/vda/vp9_uncompressed_header_parser.h b/vda/vp9_uncompressed_header_parser.h
index 655ba38..6780d38 100644
--- a/vda/vp9_uncompressed_header_parser.h
+++ b/vda/vp9_uncompressed_header_parser.h
@@ -1,6 +1,7 @@
 // Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+// Note: ported from Chromium commit head: e5a9a62
 
 #ifndef VP9_UNCOMPRESSED_HEADER_PARSER_H_
 #define VP9_UNCOMPRESSED_HEADER_PARSER_H_