adt3-S media_modules source code [1/1]

internal tot CL:
h265: Revert fix special long ref issue [1/1]

SWPL-49771

Problem:
some file playback unsmooth.

Solution:
Revert commit 915cf6061ec37e7f937b6c73d7db10ec1b8db335

Verify:
A311D-W400

Change-Id(I38171dd2c8b5bbc1aafe07185d9c9dfaf6bb88ce)
Signed-off-by: Peng Yixin <yixin.peng@amlogic.com>
Signed-off-by: Liang Ji <liang.ji@amlogic.com>
Change-Id: Id514584bd7ee4de8bda95c75d9e25adf635b8ccc
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6640ff9
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,44 @@
+
+CONFIGS := CONFIG_AMLOGIC_MEDIA_VDEC_MPEG12=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_VC1=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264_MVC=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H265=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_VP9=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_REAL=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS2=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AV1=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_H264=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_JPEG=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_H265=m
+
+
+EXTRA_INCLUDE := -I$(KERNEL_SRC)/$(M)/drivers/include
+
+CONFIGS_BUILD := -Wno-parentheses-equality -Wno-pointer-bool-conversion \
+				-Wno-unused-const-variable -Wno-typedef-redefinition \
+				-Wno-logical-not-parentheses -Wno-sometimes-uninitialized
+
+
+modules:
+	$(MAKE) -C  $(KERNEL_SRC) M=$(M)/drivers modules "EXTRA_CFLAGS+=-I$(INCLUDE) -Wno-error $(CONFIGS_BUILD) $(EXTRA_INCLUDE)" $(CONFIGS)
+
+all: modules
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 M=$(M)/drivers -C $(KERNEL_SRC) modules_install
+	mkdir -p ${OUT_DIR}/../vendor_lib/modules
+	cd ${OUT_DIR}/$(M)/; find -name "*.ko" -exec cp {} ${OUT_DIR}/../vendor_lib/modules/ \;
+	mkdir -p ${OUT_DIR}/../vendor_lib/firmware/video
+	cp $(KERNEL_SRC)/$(M)/firmware/* ${OUT_DIR}/../vendor_lib/firmware/video/
+
+clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) clean
diff --git a/Media.mk b/Media.mk
new file mode 100644
index 0000000..362d4c7
--- /dev/null
+++ b/Media.mk
@@ -0,0 +1,117 @@
+ifeq ($(KERNEL_A32_SUPPORT), true)
+KERNEL_ARCH := arm
+else
+KERNEL_ARCH := arm64
+endif
+
+CONFIGS := CONFIG_AMLOGIC_MEDIA_VDEC_MPEG12=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG2_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MPEG4_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_VC1=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H264_MVC=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_H265=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_VP9=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_MJPEG_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_REAL=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS_MULTI=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AVS2=m \
+	CONFIG_AMLOGIC_MEDIA_VDEC_AV1=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_H264=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_JPEG=m \
+	CONFIG_AMLOGIC_MEDIA_VENC_H265=m
+
+define copy-media-modules
+$(foreach m, $(shell find $(strip $(1)) -name "*.ko"),\
+	$(shell cp $(m) $(strip $(2)) -rfa))
+endef
+
+ifneq (,$(TOP))
+KDIR := $(shell pwd)/$(PRODUCT_OUT)/obj/KERNEL_OBJ/
+
+MEDIA_DRIVERS := $(TOP)/hardware/amlogic/media_modules/drivers
+ifeq (,$(wildcard $(MEDIA_DRIVERS)))
+$(error No find the dir of drivers.)
+endif
+
+INCLUDE := $(MEDIA_DRIVERS)/include
+ifeq (,$(wildcard $(INCLUDE)))
+$(error No find the dir of include.)
+endif
+
+MEDIA_MODULES := $(shell pwd)/$(PRODUCT_OUT)/obj/media_modules
+ifeq (,$(wildcard $(MEDIA_MODULES)))
+$(shell mkdir $(MEDIA_MODULES) -p)
+endif
+
+MODS_OUT := $(shell pwd)/$(PRODUCT_OUT)/obj/lib_vendor
+ifeq (,$(wildcard $(MODS_OUT)))
+$(shell mkdir $(MODS_OUT) -p)
+endif
+
+UCODE_OUT := $(shell pwd)/$(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)/lib/firmware/video
+ifeq (,$(wildcard $(UCODE_OUT)))
+$(shell mkdir $(UCODE_OUT) -p)
+endif
+
+$(shell cp $(MEDIA_DRIVERS)/../firmware/* $(UCODE_OUT) -rfa)
+$(shell cp $(MEDIA_DRIVERS)/* $(MEDIA_MODULES) -rfa)
+
+define media-modules
+	PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH \
+		$(MAKE) -C $(KDIR) M=$(MEDIA_MODULES) ARCH=$(KERNEL_ARCH) \
+		CROSS_COMPILE=$(PREFIX_CROSS_COMPILE) $(CONFIGS) \
+		EXTRA_CFLAGS+=-I$(INCLUDE) modules;
+		sh $(TOP)/device/amlogic/common/copy_modules.sh $(MEDIA_MODULES) $(MODS_OUT)
+endef
+
+else
+KDIR := $(PWD)/kernel
+ifeq (,$(wildcard $(KDIR)))
+$(error No find the dir of kernel.)
+endif
+
+MEDIA_DRIVERS := $(PWD)/media_modules/drivers
+ifeq (,$(wildcard $(MEDIA_DRIVERS)))
+$(error No find the dir of drivers.)
+endif
+
+INCLUDE := $(MEDIA_DRIVERS)/include
+ifeq (,$(wildcard $(INCLUDE)))
+$(error No find the dir of include.)
+endif
+
+MODS_OUT ?= $(MEDIA_DRIVERS)/../modules
+ifeq (,$(wildcard $(MODS_OUT)))
+$(shell mkdir $(MODS_OUT) -p)
+endif
+
+ifeq ($(KERNEL_A32_SUPPORT), true)
+TOOLS := /opt/gcc-linaro-6.3.1-2017.02-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-
+else
+TOOLS := /opt/gcc-linaro-5.3-2016.02-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
+endif
+
+
+modules:
+	CCACHE_NODIRECT="true" PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH \
+		$(MAKE) -C $(KDIR) M=$(MEDIA_DRIVERS) ARCH=$(KERNEL_ARCH) \
+		CROSS_COMPILE=$(TOOLS) $(CONFIGS) \
+		EXTRA_CFLAGS+=-I$(INCLUDE) -j64
+
+copy-modules:
+	@echo "start copying media modules."
+	mkdir -p $(MODS_OUT)
+	$(call copy-media-modules, $(MEDIA_DRIVERS), $(MODS_OUT))
+
+all: modules copy-modules
+
+clean:
+	PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH \
+		$(MAKE) -C $(KDIR) M=$(MEDIA_DRIVERS) ARCH=$(KERNEL_ARCH) clean
+
+endif
diff --git a/drivers/Makefile b/drivers/Makefile
new file mode 100644
index 0000000..3e487db
--- /dev/null
+++ b/drivers/Makefile
@@ -0,0 +1,8 @@
+obj-y	+=	common/
+obj-y	+=	frame_provider/
+obj-y	+=	frame_sink/
+obj-y	+=	stream_input/
+obj-y	+=	amvdec_ports/
+obj-y	+=	fake_video_out/
+obj-y	+=	framerate_adapter/
+obj-y	+=	media_sync/
diff --git a/drivers/amvdec_ports/Makefile b/drivers/amvdec_ports/Makefile
new file mode 100644
index 0000000..6395adf
--- /dev/null
+++ b/drivers/amvdec_ports/Makefile
@@ -0,0 +1,24 @@
+obj-m += amvdec_ports.o
+amvdec_ports-objs += aml_vcodec_dec_drv.o
+amvdec_ports-objs += aml_vcodec_dec.o
+amvdec_ports-objs += aml_vcodec_util.o
+amvdec_ports-objs += aml_vcodec_adapt.o
+amvdec_ports-objs += aml_vcodec_vfm.o
+amvdec_ports-objs += vdec_drv_if.o
+amvdec_ports-objs += decoder/vdec_h264_if.o
+amvdec_ports-objs += decoder/vdec_hevc_if.o
+amvdec_ports-objs += decoder/vdec_vp9_if.o
+amvdec_ports-objs += decoder/vdec_mpeg12_if.o
+amvdec_ports-objs += decoder/vdec_mpeg4_if.o
+amvdec_ports-objs += decoder/vdec_mjpeg_if.o
+amvdec_ports-objs += decoder/vdec_av1_if.o
+ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+amvdec_ports-objs += decoder/aml_h264_parser.o
+amvdec_ports-objs += decoder/aml_hevc_parser.o
+amvdec_ports-objs += decoder/aml_vp9_parser.o
+amvdec_ports-objs += decoder/aml_mpeg12_parser.o
+amvdec_ports-objs += decoder/aml_mpeg4_parser.o
+amvdec_ports-objs += decoder/aml_mjpeg_parser.o
+amvdec_ports-objs += utils/golomb.o
+endif
+amvdec_ports-objs += utils/common.o
diff --git a/drivers/amvdec_ports/aml_vcodec_adapt.c b/drivers/amvdec_ports/aml_vcodec_adapt.c
new file mode 100644
index 0000000..95ee48a
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_adapt.c
@@ -0,0 +1,718 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/types.h>
+#include <linux/amlogic/media/utils/amstream.h>
+#include <linux/amlogic/media/utils/vformat.h>
+#include <linux/amlogic/media/utils/aformat.h>
+#include <linux/amlogic/media/frame_sync/tsync.h>
+#include <linux/amlogic/media/frame_sync/ptsserv.h>
+#include <linux/amlogic/media/frame_sync/timestamp.h>
+#include <linux/amlogic/media/utils/amports_config.h>
+#include <linux/amlogic/media/frame_sync/tsync_pcr.h>
+#include <linux/amlogic/media/codec_mm/codec_mm.h>
+#include <linux/amlogic/media/codec_mm/configs.h>
+#include <linux/amlogic/media/utils/vformat.h>
+#include <linux/amlogic/media/utils/aformat.h>
+#include <linux/amlogic/media/registers/register.h>
+#include "../stream_input/amports/adec.h"
+#include "../stream_input/amports/streambuf.h"
+#include "../stream_input/amports/streambuf_reg.h"
+#include "../stream_input/parser/tsdemux.h"
+#include "../stream_input/parser/psparser.h"
+#include "../stream_input/parser/esparser.h"
+#include "../frame_provider/decoder/utils/vdec.h"
+#include "../common/media_clock/switch/amports_gate.h"
+#include <linux/delay.h>
+#include "aml_vcodec_adapt.h"
+#include <linux/crc32.h>
+
+#define DEFAULT_VIDEO_BUFFER_SIZE		(1024 * 1024 * 3)
+#define DEFAULT_VIDEO_BUFFER_SIZE_4K		(1024 * 1024 * 6)
+#define DEFAULT_VIDEO_BUFFER_SIZE_TVP		(1024 * 1024 * 10)
+#define DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP	(1024 * 1024 * 15)
+#define DEFAULT_AUDIO_BUFFER_SIZE		(1024*768*2)
+#define DEFAULT_SUBTITLE_BUFFER_SIZE		(1024*256)
+
+#define PTS_OUTSIDE	(1)
+#define SYNC_OUTSIDE	(2)
+
+//#define DATA_DEBUG
+
+extern int dump_output_frame;
+extern void aml_recycle_dma_buffers(struct aml_vcodec_ctx *ctx, u32 handle);
+static int def_4k_vstreambuf_sizeM =
+	(DEFAULT_VIDEO_BUFFER_SIZE_4K >> 20);
+static int def_vstreambuf_sizeM =
+	(DEFAULT_VIDEO_BUFFER_SIZE >> 20);
+
+static int slow_input = 0;
+
+static int use_bufferlevelx10000 = 10000;
+static unsigned int amstream_buf_num = BUF_MAX_NUM;
+
+static struct stream_buf_s bufs[BUF_MAX_NUM] = {
+	{
+		.reg_base = VLD_MEM_VIFIFO_REG_BASE,
+		.type = BUF_TYPE_VIDEO,
+		.buf_start = 0,
+		.buf_size = DEFAULT_VIDEO_BUFFER_SIZE,
+		.default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE,
+		.first_tstamp = INVALID_PTS
+	},
+	{
+		.reg_base = AIU_MEM_AIFIFO_REG_BASE,
+		.type = BUF_TYPE_AUDIO,
+		.buf_start = 0,
+		.buf_size = DEFAULT_AUDIO_BUFFER_SIZE,
+		.default_buf_size = DEFAULT_AUDIO_BUFFER_SIZE,
+		.first_tstamp = INVALID_PTS
+	},
+	{
+		.reg_base = 0,
+		.type = BUF_TYPE_SUBTITLE,
+		.buf_start = 0,
+		.buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE,
+		.default_buf_size = DEFAULT_SUBTITLE_BUFFER_SIZE,
+		.first_tstamp = INVALID_PTS
+	},
+	{
+		.reg_base = 0,
+		.type = BUF_TYPE_USERDATA,
+		.buf_start = 0,
+		.buf_size = 0,
+		.first_tstamp = INVALID_PTS
+	},
+	{
+		.reg_base = HEVC_STREAM_REG_BASE,
+		.type = BUF_TYPE_HEVC,
+		.buf_start = 0,
+		.buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K,
+		.default_buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K,
+		.first_tstamp = INVALID_PTS
+	},
+};
+
+extern int aml_set_vfm_path, aml_set_vdec_type;
+extern bool aml_set_vfm_enable, aml_set_vdec_type_enable;
+
+static void set_default_params(struct aml_vdec_adapt *vdec)
+{
+	ulong sync_mode = (PTS_OUTSIDE | SYNC_OUTSIDE);
+
+	vdec->dec_prop.param = (void *)sync_mode;
+	vdec->dec_prop.format = vdec->format;
+	vdec->dec_prop.width = 1920;
+	vdec->dec_prop.height = 1088;
+	vdec->dec_prop.rate = 3200;
+}
+
+static int enable_hardware(struct stream_port_s *port)
+{
+	if (get_cpu_type() < MESON_CPU_MAJOR_ID_M6)
+		return -1;
+
+	amports_switch_gate("demux", 1);
+	if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
+		amports_switch_gate("parser_top", 1);
+
+	if (port->type & PORT_TYPE_VIDEO) {
+		amports_switch_gate("vdec", 1);
+
+		if (has_hevc_vdec()) {
+			if (port->type & PORT_TYPE_HEVC)
+				vdec_poweron(VDEC_HEVC);
+			else
+				vdec_poweron(VDEC_1);
+		} else {
+			if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
+				vdec_poweron(VDEC_1);
+		}
+	}
+
+	return 0;
+}
+
+static int disable_hardware(struct stream_port_s *port)
+{
+	if (get_cpu_type() < MESON_CPU_MAJOR_ID_M6)
+		return -1;
+
+	if (port->type & PORT_TYPE_VIDEO) {
+		if (has_hevc_vdec()) {
+			if (port->type & PORT_TYPE_HEVC)
+				vdec_poweroff(VDEC_HEVC);
+			else
+				vdec_poweroff(VDEC_1);
+		}
+
+		amports_switch_gate("vdec", 0);
+	}
+
+	if (get_cpu_type() >= MESON_CPU_MAJOR_ID_M8)
+		amports_switch_gate("parser_top", 0);
+
+	amports_switch_gate("demux", 0);
+
+	return 0;
+}
+
+static int reset_canuse_buferlevel(int levelx10000)
+{
+	int i;
+	struct stream_buf_s *p = NULL;
+
+	if (levelx10000 >= 0 && levelx10000 <= 10000)
+		use_bufferlevelx10000 = levelx10000;
+	else
+		use_bufferlevelx10000 = 10000;
+	for (i = 0; i < amstream_buf_num; i++) {
+		p = &bufs[i];
+		p->canusebuf_size = ((p->buf_size / 1024) *
+			use_bufferlevelx10000 / 10000) * 1024;
+		p->canusebuf_size += 1023;
+		p->canusebuf_size &= ~1023;
+
+		if (p->canusebuf_size > p->buf_size)
+			p->canusebuf_size = p->buf_size;
+	}
+
+	return 0;
+}
+
+static void change_vbufsize(struct vdec_s *vdec,
+	struct stream_buf_s *pvbuf)
+{
+	if (pvbuf->buf_start != 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "streambuf is alloced before\n");
+		return;
+	}
+
+	if (vdec->port->is_4k) {
+		pvbuf->buf_size = def_4k_vstreambuf_sizeM * SZ_1M;
+
+		if (vdec->port_flag & PORT_FLAG_DRM)
+			pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_4K_TVP;
+
+		if ((pvbuf->buf_size > 30 * SZ_1M)
+			&& (codec_mm_get_total_size() < 220 * SZ_1M)) {
+			/*if less than 250M, used 20M for 4K & 265*/
+			pvbuf->buf_size = pvbuf->buf_size >> 1;
+		}
+	} else if (pvbuf->buf_size > def_vstreambuf_sizeM * SZ_1M) {
+		if (vdec->port_flag & PORT_FLAG_DRM)
+			pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_TVP;
+	} else {
+		pvbuf->buf_size = def_vstreambuf_sizeM * SZ_1M;
+		if (vdec->port_flag & PORT_FLAG_DRM)
+			pvbuf->buf_size = DEFAULT_VIDEO_BUFFER_SIZE_TVP;
+	}
+
+	reset_canuse_buferlevel(10000);
+}
+
+static void user_buffer_init(void)
+{
+	struct stream_buf_s *pubuf = &bufs[BUF_TYPE_USERDATA];
+
+	pubuf->buf_size = 0;
+	pubuf->buf_start = 0;
+	pubuf->buf_wp = 0;
+	pubuf->buf_rp = 0;
+}
+
+static void video_component_release(struct stream_port_s *port,
+struct stream_buf_s *pbuf, int release_num)
+{
+	struct aml_vdec_adapt *ada_ctx
+		= container_of(port, struct aml_vdec_adapt, port);
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	struct vdec_s *slave = NULL;
+
+	switch (release_num) {
+	default:
+	case 0:
+	case 4: {
+		if ((port->type & PORT_TYPE_FRAME) == 0)
+			esparser_release(pbuf);
+	}
+
+	case 3: {
+		if (vdec->slave)
+			slave = vdec->slave;
+		vdec_release(vdec);
+
+		if (slave)
+			vdec_release(slave);
+		vdec = NULL;
+	}
+
+	case 2: {
+		if ((port->type & PORT_TYPE_FRAME) == 0)
+			stbuf_release(pbuf);
+	}
+
+	case 1:
+		;
+	}
+}
+
+static int video_component_init(struct stream_port_s *port,
+			  struct stream_buf_s *pbuf)
+{
+	int ret = -1;
+	struct aml_vdec_adapt *ada_ctx
+		= container_of(port, struct aml_vdec_adapt, port);
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	if ((vdec->port_flag & PORT_FLAG_VFORMAT) == 0) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "vformat not set\n");
+		return -EPERM;
+	}
+
+	if ((vdec->sys_info->height * vdec->sys_info->width) > 1920 * 1088
+		|| port->vformat == VFORMAT_H264_4K2K) {
+		port->is_4k = true;
+		if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXLX
+				&& port->vformat == VFORMAT_H264)
+			vdec_poweron(VDEC_HEVC);
+	} else
+		port->is_4k = false;
+
+	if (port->type & PORT_TYPE_FRAME) {
+		ret = vdec_init(vdec, port->is_4k);
+		if (ret < 0) {
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "failed\n");
+			video_component_release(port, pbuf, 2);
+			return ret;
+		}
+
+		return 0;
+	}
+
+	change_vbufsize(vdec, pbuf);
+
+	if (has_hevc_vdec()) {
+		if (port->type & PORT_TYPE_MPTS) {
+			if (pbuf->type == BUF_TYPE_HEVC)
+				vdec_poweroff(VDEC_1);
+			else
+				vdec_poweroff(VDEC_HEVC);
+		}
+	}
+
+	ret = stbuf_init(pbuf, vdec);
+	if (ret < 0) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "stbuf_init failed\n");
+		return ret;
+	}
+
+	/* todo: set path based on port flag */
+	ret = vdec_init(vdec, port->is_4k);
+	if (ret < 0) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "vdec_init failed\n");
+		video_component_release(port, pbuf, 2);
+		return ret;
+	}
+
+	if (vdec_dual(vdec)) {
+		ret = vdec_init(vdec->slave, port->is_4k);
+		if (ret < 0) {
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "vdec_init failed\n");
+			video_component_release(port, pbuf, 2);
+			return ret;
+		}
+	}
+
+	if (port->type & PORT_TYPE_ES) {
+		ret = esparser_init(pbuf, vdec);
+		if (ret < 0) {
+			video_component_release(port, pbuf, 3);
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "esparser_init() failed\n");
+			return ret;
+		}
+	}
+
+	pbuf->flag |= BUF_FLAG_IN_USE;
+
+	vdec_connect(vdec);
+
+	return 0;
+}
+
+static int vdec_ports_release(struct stream_port_s *port)
+{
+	struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO];
+
+	if (has_hevc_vdec()) {
+		if (port->vformat == VFORMAT_HEVC
+			|| port->vformat == VFORMAT_VP9)
+			pvbuf = &bufs[BUF_TYPE_HEVC];
+	}
+
+	if (port->type & PORT_TYPE_MPTS) {
+		tsync_pcr_stop();
+		tsdemux_release();
+	}
+
+	if (port->type & PORT_TYPE_MPPS)
+		psparser_release();
+
+	if (port->type & PORT_TYPE_VIDEO)
+		video_component_release(port, pvbuf, 0);
+
+	port->pcr_inited = 0;
+	port->flag = 0;
+
+	return 0;
+}
+
+static void set_vdec_properity(struct vdec_s *vdec,
+	struct aml_vdec_adapt *ada_ctx)
+{
+	vdec->sys_info	= &ada_ctx->dec_prop;
+	vdec->port	= &ada_ctx->port;
+	vdec->format	= ada_ctx->video_type;
+	vdec->sys_info_store = ada_ctx->dec_prop;
+	vdec->vf_receiver_name = ada_ctx->recv_name;
+
+	/* binding v4l2 ctx to vdec. */
+	vdec->private = ada_ctx->ctx;
+
+	/* set video format, sys info and vfm map.*/
+	vdec->port->vformat = vdec->format;
+	vdec->port->type |= PORT_TYPE_VIDEO;
+	vdec->port_flag |= (vdec->port->flag | PORT_FLAG_VFORMAT);
+	if (vdec->slave) {
+		vdec->slave->format = ada_ctx->dec_prop.format;
+		vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
+	}
+
+	vdec->type = VDEC_TYPE_FRAME_BLOCK;
+	vdec->port->type |= PORT_TYPE_FRAME;
+	vdec->frame_base_video_path = FRAME_BASE_PATH_V4L_OSD;
+
+	if (aml_set_vdec_type_enable) {
+		if (aml_set_vdec_type == VDEC_TYPE_STREAM_PARSER) {
+			vdec->type = VDEC_TYPE_STREAM_PARSER;
+			vdec->port->type &= ~PORT_TYPE_FRAME;
+			vdec->port->type |= PORT_TYPE_ES;
+		} else if (aml_set_vdec_type == VDEC_TYPE_FRAME_BLOCK) {
+			vdec->type = VDEC_TYPE_FRAME_BLOCK;
+			vdec->port->type &= ~PORT_TYPE_ES;
+			vdec->port->type |= PORT_TYPE_FRAME;
+		}
+	}
+
+	if (aml_set_vfm_enable)
+		vdec->frame_base_video_path = aml_set_vfm_path;
+
+	vdec->port->flag = vdec->port_flag;
+	ada_ctx->vfm_path = vdec->frame_base_video_path;
+
+	vdec->config_len = ada_ctx->config.length >
+		PAGE_SIZE ? PAGE_SIZE : ada_ctx->config.length;
+	memcpy(vdec->config, ada_ctx->config.buf, vdec->config_len);
+
+	ada_ctx->vdec = vdec;
+}
+
+static int vdec_ports_init(struct aml_vdec_adapt *ada_ctx)
+{
+	int ret = -1;
+	struct stream_buf_s *pvbuf = &bufs[BUF_TYPE_VIDEO];
+	struct vdec_s *vdec = NULL;
+
+	/* create the vdec instance.*/
+	vdec = vdec_create(&ada_ctx->port, NULL);
+	if (IS_ERR_OR_NULL(vdec))
+		return -1;
+
+	set_vdec_properity(vdec, ada_ctx);
+
+	/* init hw and gate*/
+	ret = enable_hardware(vdec->port);
+	if (ret < 0) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "enable hw fail.\n");
+		return ret;
+	}
+
+	stbuf_fetch_init();
+	user_buffer_init();
+
+	if ((vdec->port->type & PORT_TYPE_VIDEO)
+		&& (vdec->port_flag & PORT_FLAG_VFORMAT)) {
+		vdec->port->is_4k = false;
+		if (has_hevc_vdec()) {
+			if (vdec->port->vformat == VFORMAT_HEVC
+				|| vdec->port->vformat == VFORMAT_VP9)
+				pvbuf = &bufs[BUF_TYPE_HEVC];
+		}
+
+		ret = video_component_init(vdec->port, pvbuf);
+		if (ret < 0) {
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_ERROR, "video_component_init  failed\n");
+			return ret;
+		}
+
+		/* connect vdec at the end after all HW initialization */
+		vdec_connect(vdec);
+	}
+
+	return 0;
+}
+
+int video_decoder_init(struct aml_vdec_adapt *vdec)
+{
+	int ret = -1;
+
+	/* sets configure data */
+	set_default_params(vdec);
+
+	/* init the buffer work space and connect vdec.*/
+	ret = vdec_ports_init(vdec);
+	if (ret < 0) {
+		v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "vdec ports init fail.\n");
+		goto out;
+	}
+out:
+	return ret;
+}
+
+int video_decoder_release(struct aml_vdec_adapt *vdec)
+{
+	int ret = -1;
+	struct stream_port_s *port = &vdec->port;
+
+	ret = vdec_ports_release(port);
+	if (ret < 0) {
+		v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "vdec ports release fail.\n");
+		goto out;
+	}
+
+	/* disable gates */
+	ret = disable_hardware(port);
+	if (ret < 0) {
+		v4l_dbg(vdec->ctx, V4L_DEBUG_CODEC_ERROR, "disable hw fail.\n");
+		goto out;
+	}
+out:
+	return ret;
+}
+
+void dump(const char* path, const char *data, unsigned int size)
+{
+	struct file *fp;
+
+	fp = filp_open(path,
+			O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600);
+	if (!IS_ERR(fp)) {
+		kernel_write(fp, data, size, 0);
+		filp_close(fp, NULL);
+	}
+
+}
+int vdec_vbuf_write(struct aml_vdec_adapt *ada_ctx,
+	const char *buf, unsigned int count)
+{
+	int ret = -1;
+	int try_cnt = 100;
+	struct stream_port_s *port = &ada_ctx->port;
+	struct vdec_s *vdec = ada_ctx->vdec;
+	struct stream_buf_s *pbuf = NULL;
+
+	if (has_hevc_vdec()) {
+		pbuf = (port->type & PORT_TYPE_HEVC) ? &bufs[BUF_TYPE_HEVC] :
+			&bufs[BUF_TYPE_VIDEO];
+	} else
+		pbuf = &bufs[BUF_TYPE_VIDEO];
+
+	/*if (!(port_get_inited(priv))) {
+		r = video_decoder_init(priv);
+		if (r < 0)
+			return r;
+	}*/
+
+	do {
+		if (vdec->port_flag & PORT_FLAG_DRM)
+			ret = drm_write(ada_ctx->filp, pbuf, buf, count);
+		else
+			ret = esparser_write(ada_ctx->filp, pbuf, buf, count);
+
+		if (ret == -EAGAIN)
+			msleep(30);
+	} while (ret == -EAGAIN && try_cnt--);
+
+	if (slow_input) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"slow_input: es codec write size %x\n", ret);
+		msleep(10);
+	}
+
+#ifdef DATA_DEBUG
+	/* dump to file */
+	//dump_write(vbuf, size);
+	//v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO, "vbuf: %p, size: %u, ret: %d\n", vbuf, size, ret);
+#endif
+
+	return ret;
+}
+
+bool vdec_input_full(struct aml_vdec_adapt *ada_ctx)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	return (vdec->input.have_frame_num > 600) ? true : false;
+}
+
+int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx,
+	const char *buf, unsigned int count, u64 timestamp)
+{
+	int ret = -1;
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	/* set timestamp */
+	vdec_set_timestamp(vdec, timestamp);
+
+	ret = vdec_write_vframe(vdec, buf, count);
+
+	if (slow_input) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"slow_input: frame codec write size %d\n", ret);
+		msleep(30);
+	}
+
+	if (dump_output_frame > 0) {
+		dump("/data/es.data", buf, count);
+		dump_output_frame--;
+	}
+
+	v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_INPUT,
+		"write frames, vbuf: %p, size: %u, ret: %d, crc: %x, ts: %llu\n",
+		buf, count, ret, crc32_le(0, buf, count), timestamp);
+
+	return ret;
+}
+
+void vdec_vframe_input_free(void *priv, u32 handle)
+{
+	struct aml_vcodec_ctx *ctx = priv;
+
+	aml_recycle_dma_buffers(ctx, handle);
+}
+
+int vdec_vframe_write_with_dma(struct aml_vdec_adapt *ada_ctx,
+	ulong addr, u32 count, u64 timestamp, u32 handle,
+	chunk_free free, void* priv)
+{
+	int ret = -1;
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	/* set timestamp */
+	vdec_set_timestamp(vdec, timestamp);
+
+	ret = vdec_write_vframe_with_dma(vdec, addr, count,
+		handle, free, priv);
+
+	if (slow_input) {
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"slow_input: frame codec write size %d\n", ret);
+		msleep(30);
+	}
+
+	v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_INPUT,
+		"write frames, vbuf: %lx, size: %u, ret: %d, ts: %llu\n",
+		addr, count, ret, timestamp);
+
+	return ret;
+}
+
+void aml_decoder_flush(struct aml_vdec_adapt *ada_ctx)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	if (vdec)
+		vdec_set_eos(vdec, true);
+}
+
+int aml_codec_reset(struct aml_vdec_adapt *ada_ctx, int *mode)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+	int ret = 0;
+
+	if (vdec) {
+		if (!ada_ctx->ctx->q_data[AML_Q_DATA_SRC].resolution_changed)
+			vdec_set_eos(vdec, false);
+		if (*mode == V4L_RESET_MODE_NORMAL &&
+			vdec->input.have_frame_num == 0) {
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"no input reset mode: %d\n", *mode);
+			*mode = V4L_RESET_MODE_LIGHT;
+		}
+		if (ada_ctx->ctx->param_sets_from_ucode &&
+			*mode == V4L_RESET_MODE_NORMAL &&
+			ada_ctx->ctx->q_data[AML_Q_DATA_SRC].resolution_changed == true) {
+			v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"resolution_changed reset mode: %d\n", *mode);
+			*mode = V4L_RESET_MODE_LIGHT;
+		}
+		v4l_dbg(ada_ctx->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"reset mode: %d\n", *mode);
+
+		ret = vdec_v4l2_reset(vdec, *mode);
+		*mode = V4L_RESET_MODE_NORMAL;
+	}
+
+	return ret;
+}
+
+bool is_input_ready(struct aml_vdec_adapt *ada_ctx)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+	int state = VDEC_STATUS_UNINITIALIZED;
+
+	if (vdec) {
+		state = vdec_get_status(vdec);
+
+		if (state == VDEC_STATUS_CONNECTED
+			|| state == VDEC_STATUS_ACTIVE)
+			return true;
+	}
+
+	return false;
+}
+
+int vdec_frame_number(struct aml_vdec_adapt *ada_ctx)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	if (vdec)
+		return vdec_get_frame_num(vdec);
+	else
+		return -1;
+}
+
+void v4l2_config_vdec_parm(struct aml_vdec_adapt *ada_ctx, u8 *data, u32 len)
+{
+	struct vdec_s *vdec = ada_ctx->vdec;
+
+	vdec->config_len = len > PAGE_SIZE ? PAGE_SIZE : len;
+	memcpy(vdec->config, data, vdec->config_len);
+}
diff --git a/drivers/amvdec_ports/aml_vcodec_adapt.h b/drivers/amvdec_ports/aml_vcodec_adapt.h
new file mode 100644
index 0000000..b86cbff
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_adapt.h
@@ -0,0 +1,77 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef VDEC_ADAPT_H
+#define VDEC_ADAPT_H
+
+#include <linux/amlogic/media/utils/vformat.h>
+#include <linux/amlogic/media/utils/amstream.h>
+#include "../stream_input/amports/streambuf.h"
+#include "../frame_provider/decoder/utils/vdec_input.h"
+#include "aml_vcodec_drv.h"
+
+struct aml_vdec_adapt {
+	enum vformat_e format;
+	void *vsi;
+	int32_t failure;
+	uint32_t inst_addr;
+	unsigned int signaled;
+	struct aml_vcodec_ctx *ctx;
+	struct platform_device *dev;
+	wait_queue_head_t wq;
+	struct file *filp;
+	struct vdec_s *vdec;
+	struct stream_port_s port;
+	struct dec_sysinfo dec_prop;
+	struct v4l2_config_parm config;
+	int video_type;
+	char *recv_name;
+	int vfm_path;
+};
+
+int video_decoder_init(struct aml_vdec_adapt *ada_ctx);
+
+int video_decoder_release(struct aml_vdec_adapt *ada_ctx);
+
+int vdec_vbuf_write(struct aml_vdec_adapt *ada_ctx,
+	const char *buf, unsigned int count);
+
+int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx,
+	const char *buf, unsigned int count, u64 timestamp);
+
+void vdec_vframe_input_free(void *priv, u32 handle);
+
+int vdec_vframe_write_with_dma(struct aml_vdec_adapt *ada_ctx,
+	ulong addr, u32 count, u64 timestamp, u32 handle,
+	chunk_free free, void *priv);
+
+bool vdec_input_full(struct aml_vdec_adapt *ada_ctx);
+
+void aml_decoder_flush(struct aml_vdec_adapt *ada_ctx);
+
+int aml_codec_reset(struct aml_vdec_adapt *ada_ctx, int *flag);
+
+extern void dump_write(const char __user *buf, size_t count);
+
+bool is_input_ready(struct aml_vdec_adapt *ada_ctx);
+
+int vdec_frame_number(struct aml_vdec_adapt *ada_ctx);
+
+#endif /* VDEC_ADAPT_H */
+
diff --git a/drivers/amvdec_ports/aml_vcodec_dec.c b/drivers/amvdec_ports/aml_vcodec_dec.c
new file mode 100644
index 0000000..afebb7a
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_dec.c
@@ -0,0 +1,2715 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "aml_vcodec_drv.h"
+#include "aml_vcodec_dec.h"
+//#include "aml_vcodec_intr.h"
+#include "aml_vcodec_util.h"
+#include "vdec_drv_if.h"
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/crc32.h>
+#include "aml_vcodec_adapt.h"
+#include <linux/spinlock.h>
+
+#include "aml_vcodec_vfm.h"
+#include "../frame_provider/decoder/utils/decoder_bmmu_box.h"
+#include "../frame_provider/decoder/utils/decoder_mmu_box.h"
+
+#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2
+#include <trace/events/meson_atrace.h>
+
+#define OUT_FMT_IDX	0 //default h264
+#define CAP_FMT_IDX	8 //capture nv21
+
+#define AML_VDEC_MIN_W	64U
+#define AML_VDEC_MIN_H	64U
+#define DFT_CFG_WIDTH	AML_VDEC_MIN_W
+#define DFT_CFG_HEIGHT	AML_VDEC_MIN_H
+
+#define V4L2_CID_USER_AMLOGIC_BASE (V4L2_CID_USER_BASE + 0x1100)
+#define AML_V4L2_SET_DRMMODE (V4L2_CID_USER_AMLOGIC_BASE + 0)
+
+#define WORK_ITEMS_MAX (32)
+
+//#define USEC_PER_SEC 1000000
+
+#define call_void_memop(vb, op, args...)				\
+	do {								\
+		if ((vb)->vb2_queue->mem_ops->op)			\
+			(vb)->vb2_queue->mem_ops->op(args);		\
+	} while (0)
+
+static struct aml_video_fmt aml_video_formats[] = {
+	{
+		.fourcc = V4L2_PIX_FMT_H264,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_HEVC,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_VP9,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG1,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG2,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MJPEG,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_AV1,
+		.type = AML_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV21,
+		.type = AML_FMT_FRAME,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV21M,
+		.type = AML_FMT_FRAME,
+		.num_planes = 2,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.type = AML_FMT_FRAME,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.type = AML_FMT_FRAME,
+		.num_planes = 2,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_YUV420,
+		.type = AML_FMT_FRAME,
+		.num_planes = 1,
+	},
+};
+
+static const struct aml_codec_framesizes aml_vdec_framesizes[] = {
+	{
+		.fourcc	= V4L2_PIX_FMT_H264,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc	= V4L2_PIX_FMT_HEVC,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_VP9,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG1,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG2,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MPEG4,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MJPEG,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV21,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV21M,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV12,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_NV12M,
+		.stepwise = {  AML_VDEC_MIN_W, AML_VDEC_MAX_W, 2,
+				AML_VDEC_MIN_H, AML_VDEC_MAX_H, 2},
+	},
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(aml_vdec_framesizes)
+#define NUM_FORMATS ARRAY_SIZE(aml_video_formats)
+
+extern bool multiplanar;
+extern bool dump_capture_frame;
+
+extern int dmabuf_fd_install_data(int fd, void* data, u32 size);
+extern bool is_v4l2_buf_file(struct file *file);
+
+static ulong aml_vcodec_ctx_lock(struct aml_vcodec_ctx *ctx)
+{
+	ulong flags;
+
+	spin_lock_irqsave(&ctx->slock, flags);
+
+	return flags;
+}
+
+static void aml_vcodec_ctx_unlock(struct aml_vcodec_ctx *ctx, ulong flags)
+{
+	spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static struct aml_video_fmt *aml_vdec_find_format(struct v4l2_format *f)
+{
+	struct aml_video_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < NUM_FORMATS; k++) {
+		fmt = &aml_video_formats[k];
+		if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+static struct aml_q_data *aml_vdec_get_q_data(struct aml_vcodec_ctx *ctx,
+					      enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->q_data[AML_Q_DATA_SRC];
+
+	return &ctx->q_data[AML_Q_DATA_DST];
+}
+
+void aml_vdec_dispatch_event(struct aml_vcodec_ctx *ctx, u32 changes)
+{
+	struct v4l2_event event = {0};
+
+	if (ctx->receive_cmd_stop &&
+			changes != V4L2_EVENT_SRC_CH_RESOLUTION &&
+			changes != V4L2_EVENT_SEND_EOS) {
+		ctx->state = AML_STATE_ABORT;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		changes = V4L2_EVENT_REQUEST_EXIT;
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_ABORT)\n");
+	}
+
+	switch (changes) {
+	case V4L2_EVENT_SRC_CH_RESOLUTION:
+	case V4L2_EVENT_SRC_CH_HDRINFO:
+	case V4L2_EVENT_REQUEST_RESET:
+	case V4L2_EVENT_REQUEST_EXIT:
+		event.type = V4L2_EVENT_SOURCE_CHANGE;
+		event.u.src_change.changes = changes;
+		break;
+	case V4L2_EVENT_SEND_EOS:
+		event.type = V4L2_EVENT_EOS;
+		break;
+	default:
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"unsupport dispatch event %x\n", changes);
+		return;
+	}
+
+	v4l2_event_queue_fh(&ctx->fh, &event);
+	if (changes != V4L2_EVENT_SRC_CH_HDRINFO)
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "changes: %x\n", changes);
+	else
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "changes: %x\n", changes);
+}
+
+static void aml_vdec_flush_decoder(struct aml_vcodec_ctx *ctx)
+{
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__);
+
+	aml_decoder_flush(ctx->ada_ctx);
+}
+
+static void aml_vdec_pic_info_update(struct aml_vcodec_ctx *ctx)
+{
+	unsigned int dpbsize = 0;
+	int ret;
+
+	if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->last_decoded_picinfo)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Cannot get param : GET_PARAM_PICTURE_INFO ERR\n");
+		return;
+	}
+
+	if (ctx->last_decoded_picinfo.visible_width == 0 ||
+		ctx->last_decoded_picinfo.visible_height == 0 ||
+		ctx->last_decoded_picinfo.coded_width == 0 ||
+		ctx->last_decoded_picinfo.coded_height == 0) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Cannot get correct pic info\n");
+		return;
+	}
+
+	/*if ((ctx->last_decoded_picinfo.visible_width == ctx->picinfo.visible_width) ||
+	    (ctx->last_decoded_picinfo.visible_height == ctx->picinfo.visible_height))
+		return;*/
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+		"new(%d,%d), old(%d,%d), real(%d,%d)\n",
+			ctx->last_decoded_picinfo.visible_width,
+			ctx->last_decoded_picinfo.visible_height,
+			ctx->picinfo.visible_width, ctx->picinfo.visible_height,
+			ctx->last_decoded_picinfo.coded_width,
+			ctx->last_decoded_picinfo.coded_width);
+
+	ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+	if (dpbsize == 0)
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Incorrect dpb size, ret=%d\n", ret);
+
+	/* update picture information */
+	ctx->dpb_size = dpbsize;
+	ctx->picinfo = ctx->last_decoded_picinfo;
+}
+
+static bool aml_check_inst_quit(struct aml_vcodec_dev *dev,
+	struct aml_vcodec_ctx * inst, u32 id)
+{
+	struct aml_vcodec_ctx *ctx = NULL;
+	bool ret = true;
+
+	if (dev == NULL)
+		return false;
+
+	mutex_lock(&dev->dev_mutex);
+
+	if (list_empty(&dev->ctx_list)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"v4l inst list is empty.\n");
+		ret = true;
+		goto out;
+	}
+
+	list_for_each_entry(ctx, &dev->ctx_list, list) {
+		if ((ctx == inst) && (ctx->id == id)) {
+			ret = ctx->receive_cmd_stop ? true : false;
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&dev->dev_mutex);
+
+	return ret;
+}
+
+void vdec_frame_buffer_release(void *data)
+{
+	struct file_private_data *priv_data =
+		(struct file_private_data *) data;
+	struct aml_vcodec_dev *dev = (struct aml_vcodec_dev *)
+		priv_data->vb_handle;
+	struct aml_vcodec_ctx *inst = (struct aml_vcodec_ctx *)
+		priv_data->v4l_dec_ctx;
+	u32 id = priv_data->v4l_inst_id;
+
+	if (aml_check_inst_quit(dev, inst, id)) {
+		struct vframe_s *vf = &priv_data->vf;
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR,
+			"[%d]: vf idx: %d, bmmu idx: %d, bmmu_box: %lx\n",
+			id, vf->index, vf->mm_box.bmmu_idx,
+			(ulong) vf->mm_box.bmmu_box);
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR,
+			"[%d]: vf idx: %d, mmu_idx: %d, mmu_box: %lx\n",
+			id, vf->index, vf->mm_box.mmu_idx,
+			(ulong) vf->mm_box.mmu_box);
+
+		if (decoder_bmmu_box_valide_check(vf->mm_box.bmmu_box)) {
+			decoder_bmmu_box_free_idx(vf->mm_box.bmmu_box,
+				vf->mm_box.bmmu_idx);
+			decoder_bmmu_try_to_release_box(vf->mm_box.bmmu_box);
+		}
+
+		if (decoder_mmu_box_valide_check(vf->mm_box.mmu_box)) {
+			decoder_mmu_box_free_idx(vf->mm_box.mmu_box,
+				vf->mm_box.mmu_idx);
+			decoder_mmu_try_to_release_box(vf->mm_box.mmu_box);
+		}
+
+	}
+
+	memset(data, 0, sizeof(struct file_private_data));
+	kfree(data);
+}
+
+int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out_fb)
+{
+	ulong flags;
+	struct vb2_buffer *dst_buf = NULL;
+	struct vdec_v4l2_buffer *pfb;
+	struct aml_video_dec_buf *dst_buf_info, *info;
+	struct vb2_v4l2_buffer *dst_vb2_v4l2;
+
+	flags = aml_vcodec_ctx_lock(ctx);
+
+	if (ctx->state == AML_STATE_ABORT) {
+		aml_vcodec_ctx_unlock(ctx, flags);
+		return -1;
+	}
+
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	if (!dst_buf) {
+		aml_vcodec_ctx_unlock(ctx, flags);
+		return -1;
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"vbuf idx: %d, state: %d, ready: %d\n",
+		dst_buf->index, dst_buf->state,
+		v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx));
+
+	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+	dst_buf_info = container_of(dst_vb2_v4l2, struct aml_video_dec_buf, vb);
+
+	pfb	= &dst_buf_info->frame_buffer;
+	pfb->buf_idx	= dst_buf->index;
+	pfb->num_planes	= dst_buf->num_planes;
+	pfb->status		= FB_ST_NORMAL;
+	if (dst_buf->num_planes == 1) {
+		pfb->m.mem[0].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		pfb->m.mem[0].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+		pfb->m.mem[0].size	= ctx->picinfo.y_len_sz + ctx->picinfo.c_len_sz;
+		pfb->m.mem[0].offset	= ctx->picinfo.y_len_sz;
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"idx: %u, 1 plane, y:(0x%lx, %d)\n", dst_buf->index,
+			pfb->m.mem[0].addr, pfb->m.mem[0].size);
+	} else if (dst_buf->num_planes == 2) {
+		pfb->m.mem[0].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		pfb->m.mem[0].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+		pfb->m.mem[0].size	= ctx->picinfo.y_len_sz;
+		pfb->m.mem[0].offset	= 0;
+
+		pfb->m.mem[1].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+		pfb->m.mem[1].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[1].dma_addr);
+		pfb->m.mem[1].size	= ctx->picinfo.c_len_sz;
+		pfb->m.mem[1].offset	= ctx->picinfo.c_len_sz >> 1;
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"idx: %u, 2 planes, y:(0x%lx, %d), c:(0x%lx, %d)\n", dst_buf->index,
+			pfb->m.mem[0].addr, pfb->m.mem[0].size,
+			pfb->m.mem[1].addr, pfb->m.mem[1].size);
+	} else {
+		pfb->m.mem[0].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+		pfb->m.mem[0].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+		pfb->m.mem[0].size	= ctx->picinfo.y_len_sz;
+		pfb->m.mem[0].offset	= 0;
+
+		pfb->m.mem[1].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+		pfb->m.mem[1].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[2].dma_addr);
+		pfb->m.mem[1].size	= ctx->picinfo.c_len_sz >> 1;
+		pfb->m.mem[1].offset	= 0;
+
+		pfb->m.mem[2].dma_addr	= vb2_dma_contig_plane_dma_addr(dst_buf, 2);
+		pfb->m.mem[2].addr	= dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[3].dma_addr);
+		pfb->m.mem[2].size	= ctx->picinfo.c_len_sz >> 1;
+		pfb->m.mem[2].offset	= 0;
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"idx: %u, 3 planes, y:(0x%lx, %d), u:(0x%lx, %d), v:(0x%lx, %d)\n",
+			dst_buf->index,
+			pfb->m.mem[0].addr, pfb->m.mem[0].size,
+			pfb->m.mem[1].addr, pfb->m.mem[1].size,
+			pfb->m.mem[2].addr, pfb->m.mem[2].size);
+	}
+
+	dst_buf_info->used = true;
+	ctx->buf_used_count++;
+
+	*out_fb = pfb;
+
+	info = container_of(pfb, struct aml_video_dec_buf, frame_buffer);
+
+	ctx->cap_pool.dec++;
+	ctx->cap_pool.seq[ctx->cap_pool.out++] =
+		(V4L_CAP_BUFF_IN_DEC << 16 | dst_buf->index);
+	v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+	aml_vcodec_ctx_unlock(ctx, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(get_fb_from_queue);
+
+int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *in_fb)
+{
+	struct aml_video_dec_buf *dstbuf;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__);
+
+	if (in_fb == NULL) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR, "No free frame buffer\n");
+		return -1;
+	}
+
+	dstbuf = container_of(in_fb, struct aml_video_dec_buf, frame_buffer);
+
+	mutex_lock(&ctx->lock);
+
+	if (!dstbuf->used)
+		goto out;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+		"status=%x queue id=%d to rdy_queue\n",
+		in_fb->status, dstbuf->vb.vb2_buf.index);
+
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+
+	dstbuf->used = false;
+out:
+	mutex_unlock(&ctx->lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(put_fb_to_queue);
+
+void trans_vframe_to_user(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *fb)
+{
+	struct aml_video_dec_buf *dstbuf = NULL;
+	struct vb2_buffer *vb2_buf = NULL;
+	struct vframe_s *vf = (struct vframe_s *)fb->vf_handle;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_OUTPUT,
+		"FROM (%s %s) vf: %lx, ts: %llu, idx: %d, "
+		"Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)\n",
+		vf_get_provider(ctx->ada_ctx->recv_name)->name,
+		ctx->ada_ctx->vfm_path != FRAME_BASE_PATH_V4L_VIDEO ? "OSD" : "VIDEO",
+		(ulong) vf, vf->timestamp, vf->index,
+		fb->m.mem[0].addr, fb->m.mem[0].size,
+		fb->m.mem[1].addr, fb->m.mem[1].size,
+		fb->m.mem[2].addr, fb->m.mem[2].size);
+
+	dstbuf = container_of(fb, struct aml_video_dec_buf, frame_buffer);
+	vb2_buf = &dstbuf->vb.vb2_buf;
+
+	if (dstbuf->frame_buffer.num_planes == 1) {
+		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, fb->m.mem[0].bytes_used);
+	} else if (dstbuf->frame_buffer.num_planes == 2) {
+		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, fb->m.mem[0].bytes_used);
+		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, fb->m.mem[1].bytes_used);
+	}
+	dstbuf->vb.vb2_buf.timestamp = vf->timestamp;
+	dstbuf->ready_to_display = true;
+
+	if (dump_capture_frame) {
+		struct file *fp;
+		fp = filp_open("/data/dec_dump.raw",
+				O_CREAT | O_RDWR | O_LARGEFILE | O_APPEND, 0600);
+		if (!IS_ERR(fp)) {
+			struct vb2_buffer *vb = &dstbuf->vb.vb2_buf;
+			kernel_write(fp,vb2_plane_vaddr(vb, 0),vb->planes[0].bytesused, 0);
+			if (dstbuf->frame_buffer.num_planes == 2)
+				kernel_write(fp,vb2_plane_vaddr(vb, 1),
+						vb->planes[1].bytesused, 0);
+			pr_info("dump idx: %d %dx%d\n", dump_capture_frame, vf->width, vf->height);
+			dump_capture_frame = false;
+			filp_close(fp, NULL);
+		}
+	}
+
+	if (vf->flag & VFRAME_FLAG_EMPTY_FRAME_V4L) {
+		dstbuf->vb.flags = V4L2_BUF_FLAG_LAST;
+		if (dstbuf->frame_buffer.num_planes == 1) {
+			vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, 0);
+		} else if (dstbuf->frame_buffer.num_planes == 2) {
+			vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, 0);
+			vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, 0);
+		}
+		ctx->has_receive_eos = true;
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"recevie a empty frame. idx: %d, state: %d\n",
+			dstbuf->vb.vb2_buf.index,
+			dstbuf->vb.vb2_buf.state);
+		ATRACE_COUNTER("v4l2_eos", 0);
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+		"receive vbuf idx: %d, state: %d\n",
+		dstbuf->vb.vb2_buf.index,
+		dstbuf->vb.vb2_buf.state);
+
+	if (vf->flag & VFRAME_FLAG_EMPTY_FRAME_V4L) {
+		if (ctx->q_data[AML_Q_DATA_SRC].resolution_changed) {
+			/* make the run to stanby until new buffs to enque. */
+			ctx->v4l_codec_dpb_ready = false;
+			ctx->reset_flag = V4L_RESET_MODE_LIGHT;
+
+			/*
+			 * After all buffers containing decoded frames from
+			 * before the resolution change point ready to be
+			 * dequeued on the CAPTURE queue, the driver sends a
+			 * V4L2_EVENT_SOURCE_CHANGE event for source change
+			 * type V4L2_EVENT_SRC_CH_RESOLUTION, also the upper
+			 * layer will get new information from cts->picinfo.
+			 */
+			aml_vdec_dispatch_event(ctx, V4L2_EVENT_SRC_CH_RESOLUTION);
+		} else
+			aml_vdec_dispatch_event(ctx, V4L2_EVENT_SEND_EOS);
+	}
+
+	if (dstbuf->vb.vb2_buf.state == VB2_BUF_STATE_ACTIVE) {
+		/* binding vframe handle. */
+		vf->flag |= VFRAME_FLAG_VIDEO_LINEAR;
+		ATRACE_COUNTER("v4l2_from", vf->index_disp);
+		dstbuf->privdata.vf = *vf;
+		dstbuf->privdata.vf.omx_index =
+			dstbuf->vb.vb2_buf.index;
+
+		v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+	}
+
+	mutex_lock(&ctx->state_lock);
+	if (ctx->state == AML_STATE_FLUSHING &&
+		ctx->has_receive_eos) {
+		ctx->state = AML_STATE_FLUSHED;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_FLUSHED)\n");
+	}
+	mutex_unlock(&ctx->state_lock);
+
+	ctx->decoded_frame_cnt++;
+}
+
+static int get_display_buffer(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out)
+{
+	int ret = -1;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__);
+
+	ret = vdec_if_get_param(ctx, GET_PARAM_DISP_FRAME_BUFFER, out);
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Cannot get param : GET_PARAM_DISP_FRAME_BUFFER\n");
+		return -1;
+	}
+
+	if (!*out) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"No display frame buffer\n");
+		return -1;
+	}
+
+	return ret;
+}
+
+static void aml_check_dpb_ready(struct aml_vcodec_ctx *ctx)
+{
+	if (!ctx->v4l_codec_dpb_ready) {
+		/*
+		 * make sure enough dst bufs for decoding.
+		 */
+		if ((ctx->dpb_size) && (ctx->cap_pool.in >= ctx->dpb_size))
+			ctx->v4l_codec_dpb_ready = true;
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"dpb: %d, ready: %d, used: %d, dpb is ready: %s\n",
+			ctx->dpb_size, v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx),
+			ctx->cap_pool.out, ctx->v4l_codec_dpb_ready ? "yes" : "no");
+	}
+}
+
+static int is_vdec_ready(struct aml_vcodec_ctx *ctx)
+{
+	struct aml_vcodec_dev *dev = ctx->dev;
+
+	if (!is_input_ready(ctx->ada_ctx)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"the decoder input has not ready.\n");
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		return 0;
+	}
+
+	if (ctx->state == AML_STATE_PROBE) {
+		mutex_lock(&ctx->state_lock);
+		if (ctx->state == AML_STATE_PROBE) {
+			ctx->state = AML_STATE_READY;
+			ATRACE_COUNTER("v4l2_state", ctx->state);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+				"vcodec state (AML_STATE_READY)\n");
+		}
+		mutex_unlock(&ctx->state_lock);
+	}
+
+	mutex_lock(&ctx->state_lock);
+	if (ctx->state == AML_STATE_READY) {
+		if (ctx->m2m_ctx->out_q_ctx.q.streaming &&
+			ctx->m2m_ctx->cap_q_ctx.q.streaming) {
+			ctx->state = AML_STATE_ACTIVE;
+			ATRACE_COUNTER("v4l2_state", ctx->state);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+				"vcodec state (AML_STATE_ACTIVE)\n");
+		}
+	}
+	mutex_unlock(&ctx->state_lock);
+
+	/* check dpb ready */
+	//aml_check_dpb_ready(ctx);
+
+	return 1;
+}
+
+static bool is_enough_work_items(struct aml_vcodec_ctx *ctx)
+{
+	struct aml_vcodec_dev *dev = ctx->dev;
+
+	if (vdec_frame_number(ctx->ada_ctx) >= WORK_ITEMS_MAX) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		return false;
+	}
+
+	return true;
+}
+
+static void aml_wait_dpb_ready(struct aml_vcodec_ctx *ctx)
+{
+	ulong expires;
+
+	expires = jiffies + msecs_to_jiffies(1000);
+	while (!ctx->v4l_codec_dpb_ready) {
+		u32 ready_num = 0;
+
+		if (time_after(jiffies, expires)) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+				"the DPB state has not ready.\n");
+			break;
+		}
+
+		ready_num = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx);
+		if ((ready_num + ctx->buf_used_count) >= ctx->dpb_size)
+			ctx->v4l_codec_dpb_ready = true;
+	}
+}
+
+void aml_recycle_dma_buffers(struct aml_vcodec_ctx *ctx, u32 handle)
+{
+	struct vb2_v4l2_buffer *vb;
+	struct aml_video_dec_buf *buf;
+	struct vb2_queue *q;
+	int index = handle & 0xf;
+
+	if (ctx->is_out_stream_off) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT,
+			"ignore buff idx: %d streamoff\n", index);
+		return;
+	}
+	q = v4l2_m2m_get_vq(ctx->m2m_ctx,
+		V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+	vb = to_vb2_v4l2_buffer(q->bufs[index]);
+	buf = container_of(vb, struct aml_video_dec_buf, vb);
+	v4l2_m2m_buf_done(vb, buf->error ? VB2_BUF_STATE_ERROR :
+		VB2_BUF_STATE_DONE);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT,
+		"recycle buff idx: %d, vbuf: %lx\n", index,
+		(ulong)vb2_dma_contig_plane_dma_addr(q->bufs[index], 0));
+}
+
+static void aml_vdec_worker(struct work_struct *work)
+{
+	struct aml_vcodec_ctx *ctx =
+		container_of(work, struct aml_vcodec_ctx, decode_work);
+	struct aml_vcodec_dev *dev = ctx->dev;
+	struct vb2_buffer *src_buf;
+	struct aml_vcodec_mem buf;
+	bool res_chg = false;
+	int ret;
+	struct aml_video_dec_buf *src_buf_info;
+	struct vb2_v4l2_buffer *src_vb2_v4l2;
+
+	if (ctx->state < AML_STATE_INIT ||
+		ctx->state > AML_STATE_FLUSHED) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		goto out;
+	}
+
+	if (!is_vdec_ready(ctx)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"the decoder has not ready.\n");
+		goto out;
+	}
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	if (src_buf == NULL) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"src_buf empty.\n");
+		goto out;
+	}
+
+	/*this case for google, but some frames are droped on ffmpeg, so disabled temp.*/
+	if (0 && !is_enough_work_items(ctx))
+		goto out;
+
+	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
+	src_buf_info = container_of(src_vb2_v4l2, struct aml_video_dec_buf, vb);
+
+	if (src_buf_info->lastframe) {
+		/*the empty data use to flushed the decoder.*/
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"Got empty flush input buffer.\n");
+
+		/*
+		 * when inputs a small amount of src buff, then soon to
+		 * switch state FLUSHING, must to wait the DBP to be ready.
+		 */
+		if (!ctx->v4l_codec_dpb_ready) {
+			v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+			goto out;
+		}
+
+		mutex_lock(&ctx->state_lock);
+		if (ctx->state == AML_STATE_ACTIVE) {
+			ctx->state = AML_STATE_FLUSHING;// prepare flushing
+			ATRACE_COUNTER("v4l2_state", ctx->state);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+				"vcodec state (AML_STATE_FLUSHING-LASTFRM)\n");
+		}
+		mutex_unlock(&ctx->state_lock);
+
+		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+
+		/* sets eos data for vdec input. */
+		aml_vdec_flush_decoder(ctx);
+
+		goto out;
+	}
+
+	buf.index	= src_buf->index;
+	buf.vaddr	= vb2_plane_vaddr(src_buf, 0);
+	buf.addr	= vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	buf.size	= src_buf->planes[0].bytesused;
+	buf.model	= src_buf->memory;
+	buf.timestamp	= src_buf->timestamp;
+
+	if (!buf.vaddr && !buf.addr) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"id=%d src_addr is NULL.\n", src_buf->index);
+		goto out;
+	}
+
+	src_buf_info->used = true;
+
+	/* v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+		"size: 0x%zx, crc: 0x%x\n",
+		buf.size, crc32(0, buf.va, buf.size));*/
+
+	/* pts = (time / 10e6) * (90k / fps) */
+	/*v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+		"timestamp: 0x%llx\n", src_buf->timestamp);*/
+
+	ret = vdec_if_decode(ctx, &buf, &res_chg);
+	if (ret > 0) {
+		/*
+		 * we only return src buffer with VB2_BUF_STATE_DONE
+		 * when decode success without resolution change.
+		 */
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		if (!(ctx->is_drm_mode && buf.model == VB2_MEMORY_DMABUF))
+			v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+	} else if (ret && ret != -EAGAIN) {
+		src_buf_info->error = (ret == -EIO ? true : false);
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+		if (!(ctx->is_drm_mode && buf.model == VB2_MEMORY_DMABUF))
+			v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"error processing src data. %d.\n", ret);
+	} else if (res_chg) {
+		/* wait the DPB state to be ready. */
+		aml_wait_dpb_ready(ctx);
+
+		src_buf_info->used = false;
+		aml_vdec_pic_info_update(ctx);
+		/*
+		 * On encountering a resolution change in the stream.
+		 * The driver must first process and decode all
+		 * remaining buffers from before the resolution change
+		 * point, so call flush decode here
+		 */
+		mutex_lock(&ctx->state_lock);
+		if (ctx->state == AML_STATE_ACTIVE) {
+			ctx->state = AML_STATE_FLUSHING;// prepare flushing
+			ATRACE_COUNTER("v4l2_state", ctx->state);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+				"vcodec state (AML_STATE_FLUSHING-RESCHG)\n");
+		}
+		mutex_unlock(&ctx->state_lock);
+
+		ctx->q_data[AML_Q_DATA_SRC].resolution_changed = true;
+		while (ctx->m2m_ctx->job_flags & TRANS_RUNNING) {
+			v4l2_m2m_job_pause(dev->m2m_dev_dec, ctx->m2m_ctx);
+		}
+
+		aml_vdec_flush_decoder(ctx);
+
+		goto out;
+	} else {
+		/* decoder is lack of resource, retry after short delay */
+		usleep_range(50000, 55000);
+	}
+
+	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+out:
+	return;
+}
+
+static void aml_vdec_reset(struct aml_vcodec_ctx *ctx)
+{
+	if (ctx->state == AML_STATE_ABORT) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"the decoder will be exited.\n");
+		goto out;
+	}
+
+	if (aml_codec_reset(ctx->ada_ctx, &ctx->reset_flag)) {
+		ctx->state = AML_STATE_ABORT;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_ABORT).\n");
+		goto out;
+	}
+
+	if (ctx->state ==  AML_STATE_RESET) {
+		ctx->state = AML_STATE_PROBE;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_PROBE)\n");
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+			"dpb: %d, ready: %d, used: %d\n", ctx->dpb_size,
+			v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx),
+			ctx->buf_used_count);
+
+		/* vdec has ready to decode subsequence data of new resolution. */
+		ctx->q_data[AML_Q_DATA_SRC].resolution_changed = false;
+		v4l2_m2m_job_resume(ctx->dev->m2m_dev_dec, ctx->m2m_ctx);
+	}
+
+out:
+	complete(&ctx->comp);
+	return;
+}
+
+void wait_vcodec_ending(struct aml_vcodec_ctx *ctx)
+{
+	struct aml_vcodec_dev *dev = ctx->dev;
+
+	/* disable queue output item to worker. */
+	ctx->output_thread_ready = false;
+
+	/* flush output buffer worker. */
+	flush_workqueue(dev->decode_workqueue);
+
+	/* clean output cache and decoder status . */
+	if (ctx->state > AML_STATE_INIT)
+		aml_vdec_reset(ctx);
+
+	/* pause the job and clean trans status. */
+	while (ctx->m2m_ctx->job_flags & TRANS_RUNNING) {
+		v4l2_m2m_job_pause(ctx->dev->m2m_dev_dec, ctx->m2m_ctx);
+	}
+
+	ctx->v4l_codec_dpb_ready = false;
+}
+
+void try_to_capture(struct aml_vcodec_ctx *ctx)
+{
+	int ret = 0;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	ret = get_display_buffer(ctx, &fb);
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"the que have no disp buf,ret: %d\n", ret);
+		return;
+	}
+
+	trans_vframe_to_user(ctx, fb);
+}
+EXPORT_SYMBOL_GPL(try_to_capture);
+
+static int vdec_thread(void *data)
+{
+	struct sched_param param =
+		{.sched_priority = MAX_RT_PRIO / 2};
+	struct aml_vdec_thread *thread =
+		(struct aml_vdec_thread *) data;
+	struct aml_vcodec_ctx *ctx =
+		(struct aml_vcodec_ctx *) thread->priv;
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+
+	for (;;) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+			"%s, state: %d\n", __func__, ctx->state);
+
+		if (down_interruptible(&thread->sem))
+			break;
+
+		if (thread->stop)
+			break;
+
+		/* handle event. */
+		thread->func(ctx);
+	}
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
+
+	return 0;
+}
+
+void aml_thread_notify(struct aml_vcodec_ctx *ctx,
+	enum aml_thread_type type)
+{
+	struct aml_vdec_thread *thread = NULL;
+
+	mutex_lock(&ctx->lock);
+	list_for_each_entry(thread, &ctx->vdec_thread_list, node) {
+		if (thread->task == NULL)
+			continue;
+
+		if (thread->type == type)
+			up(&thread->sem);
+	}
+	mutex_unlock(&ctx->lock);
+}
+EXPORT_SYMBOL_GPL(aml_thread_notify);
+
+int aml_thread_start(struct aml_vcodec_ctx *ctx, aml_thread_func func,
+	enum aml_thread_type type, const char *thread_name)
+{
+	struct aml_vdec_thread *thread;
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	int ret = 0;
+
+	thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+	if (thread == NULL)
+		return -ENOMEM;
+
+	thread->type = type;
+	thread->func = func;
+	thread->priv = ctx;
+	sema_init(&thread->sem, 0);
+
+	thread->task = kthread_run(vdec_thread, thread, "aml-%s", thread_name);
+	if (IS_ERR(thread->task)) {
+		ret = PTR_ERR(thread->task);
+		thread->task = NULL;
+		goto err;
+	}
+	sched_setscheduler_nocheck(thread->task, SCHED_FIFO, &param);
+
+	list_add(&thread->node, &ctx->vdec_thread_list);
+
+	return 0;
+
+err:
+	kfree(thread);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(aml_thread_start);
+
+void aml_thread_stop(struct aml_vcodec_ctx *ctx)
+{
+	struct aml_vdec_thread *thread = NULL;
+
+	while (!list_empty(&ctx->vdec_thread_list)) {
+		thread = list_entry(ctx->vdec_thread_list.next,
+			struct aml_vdec_thread, node);
+		mutex_lock(&ctx->lock);
+		list_del(&thread->node);
+		mutex_unlock(&ctx->lock);
+
+		thread->stop = true;
+		up(&thread->sem);
+		kthread_stop(thread->task);
+		thread->task = NULL;
+		kfree(thread);
+	}
+}
+EXPORT_SYMBOL_GPL(aml_thread_stop);
+
+static int vidioc_try_decoder_cmd(struct file *file, void *priv,
+				struct v4l2_decoder_cmd *cmd)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, cmd: %u\n", __func__, cmd->cmd);
+
+	switch (cmd->cmd) {
+	case V4L2_DEC_CMD_STOP:
+	case V4L2_DEC_CMD_START:
+		if (cmd->flags != 0) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"cmd->flags=%u\n", cmd->flags);
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_decoder_cmd(struct file *file, void *priv,
+				struct v4l2_decoder_cmd *cmd)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct vb2_queue *src_vq, *dst_vq;
+	int ret;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, cmd: %u\n", __func__, cmd->cmd);
+
+	ret = vidioc_try_decoder_cmd(file, priv, cmd);
+	if (ret)
+		return ret;
+
+	switch (cmd->cmd) {
+	case V4L2_DEC_CMD_STOP:
+		ATRACE_COUNTER("v4l2_stop", 0);
+		if (ctx->state != AML_STATE_ACTIVE) {
+			if (ctx->state >= AML_STATE_IDLE &&
+				ctx->state < AML_STATE_PROBE) {
+				ctx->state = AML_STATE_ABORT;
+				ATRACE_COUNTER("v4l2_state", ctx->state);
+				aml_vdec_dispatch_event(ctx, V4L2_EVENT_REQUEST_EXIT);
+				v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+					"vcodec state (AML_STATE_ABORT)\n");
+				return 0;
+			}
+		}
+
+		src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+		if (!vb2_is_streaming(src_vq)) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+				"Output stream is off. No need to flush.\n");
+			return 0;
+		}
+
+		/* flush pipeline */
+		v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
+		v4l2_m2m_try_schedule(ctx->m2m_ctx);//pay attention
+		ctx->receive_cmd_stop = true;
+		break;
+
+	case V4L2_DEC_CMD_START:
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "CMD V4L2_DEC_CMD_START\n");
+		dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+			multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+			V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		vb2_clear_last_buffer_dequeued(dst_vq);//pay attention
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_decoder_streamon(struct file *file, void *priv,
+	enum v4l2_buf_type i)
+{
+	struct v4l2_fh *fh = file->private_data;
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+	struct vb2_queue *q;
+
+	q = v4l2_m2m_get_vq(fh->m2m_ctx, i);
+	if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+		if (ctx->is_stream_off) {
+			mutex_lock(&ctx->state_lock);
+			if ((ctx->state == AML_STATE_ACTIVE ||
+				ctx->state == AML_STATE_FLUSHING ||
+				ctx->state == AML_STATE_FLUSHED) ||
+				(ctx->reset_flag == V4L_RESET_MODE_LIGHT)) {
+				ctx->state = AML_STATE_RESET;
+				ATRACE_COUNTER("v4l2_state", ctx->state);
+				ctx->v4l_codec_dpb_ready = false;
+
+				v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+					"vcodec state (AML_STATE_RESET)\n");
+				aml_vdec_reset(ctx);
+			}
+			mutex_unlock(&ctx->state_lock);
+
+			ctx->is_stream_off = false;
+			ctx->v4l_resolution_change = false;
+		}
+	} else
+		ctx->is_out_stream_off = false;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, q->type);
+
+	return v4l2_m2m_ioctl_streamon(file, priv, i);
+}
+
+static int vidioc_decoder_streamoff(struct file *file, void *priv,
+	enum v4l2_buf_type i)
+{
+	struct v4l2_fh *fh = file->private_data;
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+	struct vb2_queue *q;
+
+	q = v4l2_m2m_get_vq(fh->m2m_ctx, i);
+	if (!V4L2_TYPE_IS_OUTPUT(q->type))
+		ctx->is_stream_off = true;
+	else
+		ctx->is_out_stream_off = true;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, q->type);
+
+	return v4l2_m2m_ioctl_streamoff(file, priv, i);
+}
+
+static int vidioc_decoder_reqbufs(struct file *file, void *priv,
+	struct v4l2_requestbuffers *rb)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_fh *fh = file->private_data;
+	struct vb2_queue *q;
+
+	q = v4l2_m2m_get_vq(fh->m2m_ctx, rb->type);
+
+	if (!rb->count)
+		vb2_queue_release(q);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d, count: %d\n",
+		__func__, q->type, rb->count);
+
+	if (!V4L2_TYPE_IS_OUTPUT(rb->type)) {
+		/* driver needs match v4l buffer number with dpb_size */
+		if (rb->count > ctx->dpb_size) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+					"reqbufs (st:%d) %d -> %d\n",
+					ctx->state, rb->count, ctx->dpb_size);
+			//rb->count = ctx->dpb_size;
+		}
+	} else {
+		ctx->output_dma_mode =
+			(rb->memory == VB2_MEMORY_DMABUF) ? 1 : 0;
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_INPUT,
+			"output buffer memory mode is %d\n", rb->memory);
+	}
+
+	return v4l2_m2m_ioctl_reqbufs(file, priv, rb);
+}
+
+static int vidioc_vdec_querybuf(struct file *file, void *priv,
+	struct v4l2_buffer *buf)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, buf->type);
+
+	return v4l2_m2m_ioctl_querybuf(file, priv, buf);
+}
+
+static int vidioc_vdec_expbuf(struct file *file, void *priv,
+	struct v4l2_exportbuffer *eb)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, eb->type);
+
+	return v4l2_m2m_ioctl_expbuf(file, priv, eb);
+}
+
+void aml_vcodec_dec_release(struct aml_vcodec_ctx *ctx)
+{
+	ulong flags;
+
+	flags = aml_vcodec_ctx_lock(ctx);
+	ctx->state = AML_STATE_ABORT;
+	ATRACE_COUNTER("v4l2_state", ctx->state);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+		"vcodec state (AML_STATE_ABORT)\n");
+	aml_vcodec_ctx_unlock(ctx, flags);
+
+	vdec_if_deinit(ctx);
+}
+
+void aml_vcodec_dec_set_default_params(struct aml_vcodec_ctx *ctx)
+{
+	struct aml_q_data *q_data;
+
+	ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+	ctx->fh.m2m_ctx = ctx->m2m_ctx;
+	ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+	INIT_WORK(&ctx->decode_work, aml_vdec_worker);
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+	ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+	ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+	ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+	ctx->dev->dec_capability = 0;//VCODEC_CAPABILITY_4K_DISABLED;//disable 4k
+
+	q_data = &ctx->q_data[AML_Q_DATA_SRC];
+	memset(q_data, 0, sizeof(struct aml_q_data));
+	q_data->visible_width = DFT_CFG_WIDTH;
+	q_data->visible_height = DFT_CFG_HEIGHT;
+	q_data->fmt = &aml_video_formats[OUT_FMT_IDX];
+	q_data->field = V4L2_FIELD_NONE;
+
+	q_data->sizeimage[0] = (1024 * 1024);//DFT_CFG_WIDTH * DFT_CFG_HEIGHT; //1m
+	q_data->bytesperline[0] = 0;
+
+	q_data = &ctx->q_data[AML_Q_DATA_DST];
+	memset(q_data, 0, sizeof(struct aml_q_data));
+	q_data->visible_width = DFT_CFG_WIDTH;
+	q_data->visible_height = DFT_CFG_HEIGHT;
+	q_data->coded_width = DFT_CFG_WIDTH;
+	q_data->coded_height = DFT_CFG_HEIGHT;
+	q_data->fmt = &aml_video_formats[CAP_FMT_IDX];
+	q_data->field = V4L2_FIELD_NONE;
+
+	v4l_bound_align_image(&q_data->coded_width,
+				AML_VDEC_MIN_W,
+				AML_VDEC_MAX_W, 4,
+				&q_data->coded_height,
+				AML_VDEC_MIN_H,
+				AML_VDEC_MAX_H, 5, 6);
+
+	q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
+	q_data->bytesperline[0] = q_data->coded_width;
+	q_data->sizeimage[1] = q_data->sizeimage[0] / 2;
+	q_data->bytesperline[1] = q_data->coded_width;
+	ctx->reset_flag = V4L_RESET_MODE_NORMAL;
+
+	ctx->state = AML_STATE_IDLE;
+	ATRACE_COUNTER("v4l2_state", ctx->state);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+		"vcodec state (AML_STATE_IDLE)\n");
+}
+
+static int vidioc_vdec_qbuf(struct file *file, void *priv,
+	struct v4l2_buffer *buf)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	int ret;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, buf->type);
+
+	if (ctx->state == AML_STATE_ABORT) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Call on QBUF after unrecoverable error, type = %s\n",
+			V4L2_TYPE_IS_OUTPUT(buf->type) ? "OUT" : "IN");
+		return -EIO;
+	}
+
+	ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+
+	if (V4L2_TYPE_IS_OUTPUT(buf->type)) {
+		if (ret == -EAGAIN)
+			ATRACE_COUNTER("v4l2_qbuf_eagain", 0);
+		else
+			ATRACE_COUNTER("v4l2_qbuf_ok", 0);
+	}
+	return ret;
+}
+
+static int vidioc_vdec_dqbuf(struct file *file, void *priv,
+	struct v4l2_buffer *buf)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	int ret;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, buf->type);
+
+	if (ctx->state == AML_STATE_ABORT) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Call on DQBUF after unrecoverable error, type = %s\n",
+			V4L2_TYPE_IS_OUTPUT(buf->type) ? "OUT" : "IN");
+		if (!V4L2_TYPE_IS_OUTPUT(buf->type))
+			return -EIO;
+	}
+
+	ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+	if (V4L2_TYPE_IS_OUTPUT(buf->type)) {
+		if (ret == -EAGAIN)
+			ATRACE_COUNTER("v4l2_dqin_eagain", 0);
+		else
+			ATRACE_COUNTER("v4l2_dqin_ok", 0);
+	} else {
+		if (ret == -EAGAIN)
+			ATRACE_COUNTER("v4l2_dqout_eagain", 0);
+	}
+
+	if (!ret && !V4L2_TYPE_IS_OUTPUT(buf->type)) {
+		struct vb2_queue *vq;
+		struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+		struct aml_video_dec_buf *aml_buf = NULL;
+
+		vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type);
+		vb2_v4l2 = to_vb2_v4l2_buffer(vq->bufs[buf->index]);
+		aml_buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+		aml_buf->privdata.vb_handle	= (ulong) ctx->dev;
+		aml_buf->privdata.v4l_dec_ctx	= (ulong) ctx;
+		aml_buf->privdata.v4l_inst_id		= ctx->id;
+
+		file = fget(vb2_v4l2->private);
+		if (is_v4l2_buf_file(file)) {
+			dmabuf_fd_install_data(vb2_v4l2->private,
+				(void*)&aml_buf->privdata,
+				sizeof(struct file_private_data));
+			ATRACE_COUNTER("v4l2_dqout_ok", aml_buf->privdata.vf.index_disp);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "disp: %d, vf: %lx\n",
+				aml_buf->privdata.vf.index_disp,
+				(ulong) v4l_get_vf_handle(vb2_v4l2->private));
+		}
+		fput(file);
+	}
+
+	return ret;
+}
+
+static int vidioc_vdec_querycap(struct file *file, void *priv,
+	struct v4l2_capability *cap)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	strlcpy(cap->driver, AML_VCODEC_DEC_NAME, sizeof(cap->driver));
+	strlcpy(cap->bus_info, AML_PLATFORM_STR, sizeof(cap->bus_info));
+	strlcpy(cap->card, AML_PLATFORM_STR, sizeof(cap->card));
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, %s\n", __func__, cap->card);
+
+	return 0;
+}
+
+static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
+	const struct v4l2_event_subscription *sub)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, sub->type);
+
+	switch (sub->type) {
+	case V4L2_EVENT_EOS:
+		return v4l2_event_subscribe(fh, sub, 2, NULL);
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subscribe(fh, sub);
+	default:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	}
+}
+
+static int vidioc_vdec_event_unsubscribe(struct v4l2_fh *fh,
+	const struct v4l2_event_subscription *sub)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n",
+		__func__, sub->type);
+
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct aml_video_fmt *fmt)
+{
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+	int i;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		pix_fmt_mp->num_planes = 1;
+		pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+		if (pix_fmt_mp->pixelformat != V4L2_PIX_FMT_MPEG2  &&
+		    pix_fmt_mp->pixelformat != V4L2_PIX_FMT_H264)
+			pix_fmt_mp->field = V4L2_FIELD_NONE;
+		else if (pix_fmt_mp->field != V4L2_FIELD_NONE)
+			pr_info("%s, field: %u, fmt: %u\n",
+				__func__, pix_fmt_mp->field,
+				pix_fmt_mp->pixelformat);
+	} else if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+		int tmp_w, tmp_h;
+
+		pix_fmt_mp->field = V4L2_FIELD_NONE;
+		pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+					AML_VDEC_MIN_H,
+					AML_VDEC_MAX_H);
+		pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+					AML_VDEC_MIN_W,
+					AML_VDEC_MAX_W);
+
+		/*
+		 * Find next closer width align 64, heign align 64, size align
+		 * 64 rectangle
+		 * Note: This only get default value, the real HW needed value
+		 *       only available when ctx in AML_STATE_PROBE state
+		 */
+		tmp_w = pix_fmt_mp->width;
+		tmp_h = pix_fmt_mp->height;
+		v4l_bound_align_image(&pix_fmt_mp->width,
+					AML_VDEC_MIN_W,
+					AML_VDEC_MAX_W, 6,
+					&pix_fmt_mp->height,
+					AML_VDEC_MIN_H,
+					AML_VDEC_MAX_H, 6, 9);
+
+		if (pix_fmt_mp->width < tmp_w &&
+			(pix_fmt_mp->width + 64) <= AML_VDEC_MAX_W)
+			pix_fmt_mp->width += 64;
+		if (pix_fmt_mp->height < tmp_h &&
+			(pix_fmt_mp->height + 64) <= AML_VDEC_MAX_H)
+			pix_fmt_mp->height += 64;
+
+		pix_fmt_mp->num_planes = fmt->num_planes;
+		pix_fmt_mp->plane_fmt[0].sizeimage =
+				pix_fmt_mp->width * pix_fmt_mp->height;
+		pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+		if (pix_fmt_mp->num_planes == 2) {
+			pix_fmt_mp->plane_fmt[1].sizeimage =
+				(pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+			pix_fmt_mp->plane_fmt[1].bytesperline =
+				pix_fmt_mp->width;
+		}
+	}
+
+	for (i = 0; i < pix_fmt_mp->num_planes; i++)
+		memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+			   sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+	pix_fmt_mp->flags = 0;
+	memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct aml_video_fmt *fmt = NULL;
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %u, planes: %u, fmt: %u\n",
+		__func__, f->type, f->fmt.pix_mp.num_planes,
+		f->fmt.pix_mp.pixelformat);
+
+	fmt = aml_vdec_find_format(f);
+	if (!fmt)
+		return -EINVAL;
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+	struct aml_video_fmt *fmt = NULL;
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %u, planes: %u, fmt: %u\n",
+		__func__, f->type, f->fmt.pix_mp.num_planes,
+		f->fmt.pix_mp.pixelformat);
+
+	fmt = aml_vdec_find_format(f);
+	if (!fmt)
+		return -EINVAL;
+
+	if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"sizeimage of output format must be given\n");
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_vdec_g_selection(struct file *file, void *priv,
+	struct v4l2_selection *s)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct aml_q_data *q_data;
+
+	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+		(s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+		return -EINVAL;
+
+	q_data = &ctx->q_data[AML_Q_DATA_DST];
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE:
+		if (vdec_if_get_param(ctx, GET_PARAM_CROP_INFO, &(s->r))) {
+			/* set to default value if header info not ready yet*/
+			s->r.left = 0;
+			s->r.top = 0;
+			s->r.width = q_data->visible_width;
+			s->r.height = q_data->visible_height;
+		}
+		break;
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = ctx->picinfo.coded_width;
+		s->r.height = ctx->picinfo.coded_height;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ctx->state < AML_STATE_PROBE) {
+		/* set to default value if header info not ready yet*/
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->visible_width;
+		s->r.height = q_data->visible_height;
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n",
+		__func__, s->type);
+
+	return 0;
+}
+
+static int vidioc_vdec_s_selection(struct file *file, void *priv,
+	struct v4l2_selection *s)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n",
+		__func__, s->type);
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = ctx->picinfo.visible_width;
+		s->r.height = ctx->picinfo.visible_height;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void copy_v4l2_format_dimention(struct v4l2_pix_format_mplane *pix_mp,
+		struct aml_q_data *q_data, u32 type)
+{
+	if (!pix_mp || !q_data)
+		return;
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		pix_mp->width = q_data->visible_width;
+		pix_mp->height = q_data->visible_height;
+	} else {
+		/*
+		 * Width and height are set to the dimensions
+		 * of the movie, the buffer is bigger and
+		 * further processing stages should crop to this
+		 * rectangle.
+		 */
+		pix_mp->width = q_data->coded_width;
+		pix_mp->height = q_data->coded_height;
+	}
+
+	/*
+	 * Set pixelformat to the format in which mt vcodec
+	 * outputs the decoded frame
+	 */
+	pix_mp->num_planes = q_data->fmt->num_planes;
+	pix_mp->pixelformat = q_data->fmt->fourcc;
+	pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+	pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+	if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+		pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+	}
+}
+
+static int vidioc_vdec_s_fmt(struct file *file, void *priv,
+	struct v4l2_format *f)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_mp;
+	struct aml_q_data *q_data;
+	int ret = 0;
+	struct aml_video_fmt *fmt;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %u, planes: %u, fmt: %u\n",
+		__func__, f->type, f->fmt.pix_mp.num_planes,
+		f->fmt.pix_mp.pixelformat);
+
+	q_data = aml_vdec_get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	pix_mp = &f->fmt.pix_mp;
+	if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+	    vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"out_q_ctx buffers already requested\n");
+	}
+
+	if ((!V4L2_TYPE_IS_OUTPUT(f->type)) &&
+	    vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"cap_q_ctx buffers already requested\n");
+	}
+
+	fmt = aml_vdec_find_format(f);
+	if (fmt == NULL) {
+		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			f->fmt.pix.pixelformat =
+				aml_video_formats[OUT_FMT_IDX].fourcc;
+			fmt = aml_vdec_find_format(f);
+		} else if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+			f->fmt.pix.pixelformat =
+				aml_video_formats[CAP_FMT_IDX].fourcc;
+			fmt = aml_vdec_find_format(f);
+		}
+	}
+
+	q_data->fmt = fmt;
+	vidioc_try_fmt(f, q_data->fmt);
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (ctx->is_drm_mode)
+			pix_mp->plane_fmt[0].sizeimage = 1;
+		q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage;
+		q_data->coded_width = pix_mp->width;
+		q_data->coded_height = pix_mp->height;
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+			"w: %d, h: %d, size: %d\n",
+			pix_mp->width, pix_mp->height,
+			pix_mp->plane_fmt[0].sizeimage);
+
+		ctx->colorspace = f->fmt.pix_mp.colorspace;
+		ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+		ctx->quantization = f->fmt.pix_mp.quantization;
+		ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+		mutex_lock(&ctx->state_lock);
+		if (ctx->state == AML_STATE_IDLE) {
+			ret = vdec_if_init(ctx, q_data->fmt->fourcc);
+			if (ret) {
+				v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+					"vdec_if_init() fail ret=%d\n", ret);
+				mutex_unlock(&ctx->state_lock);
+				return -EINVAL;
+			}
+			ctx->state = AML_STATE_INIT;
+			ATRACE_COUNTER("v4l2_state", ctx->state);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+				"vcodec state (AML_STATE_INIT)\n");
+		}
+		mutex_unlock(&ctx->state_lock);
+	}
+
+	if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+		ctx->cap_pix_fmt = pix_mp->pixelformat;
+		if (ctx->state >= AML_STATE_PROBE)
+			copy_v4l2_format_dimention(pix_mp, q_data, f->type);
+	}
+
+	return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+				struct v4l2_frmsizeenum *fsize)
+{
+	int i = 0;
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, idx: %d, pix fmt: %x\n",
+		__func__, fsize->index, fsize->pixel_format);
+
+	if (fsize->index != 0)
+		return -EINVAL;
+
+	for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+		if (fsize->pixel_format != aml_vdec_framesizes[i].fourcc)
+			continue;
+
+		fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+		fsize->stepwise = aml_vdec_framesizes[i].stepwise;
+		if (!(ctx->dev->dec_capability &
+				VCODEC_CAPABILITY_4K_DISABLED)) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "4K is enabled\n");
+			fsize->stepwise.max_width =
+					VCODEC_DEC_4K_CODED_WIDTH;
+			fsize->stepwise.max_height =
+					VCODEC_DEC_4K_CODED_HEIGHT;
+		}
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+			"%x, %d %d %d %d %d %d\n",
+			ctx->dev->dec_capability,
+			fsize->stepwise.min_width,
+			fsize->stepwise.max_width,
+			fsize->stepwise.step_width,
+			fsize->stepwise.min_height,
+			fsize->stepwise.max_height,
+			fsize->stepwise.step_height);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+	struct aml_video_fmt *fmt;
+	int i, j = 0;
+
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (output_queue && (aml_video_formats[i].type != AML_FMT_DEC))
+			continue;
+		if (!output_queue && (aml_video_formats[i].type != AML_FMT_FRAME))
+			continue;
+
+		if (j == f->index) {
+			fmt = &aml_video_formats[i];
+			f->pixelformat = fmt->fourcc;
+			return 0;
+		}
+		++j;
+	}
+
+	return -EINVAL;
+}
+
+static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file,
+	void *priv, struct v4l2_fmtdesc *f)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__);
+
+	return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file,
+	void *priv, struct v4l2_fmtdesc *f)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__);
+
+	return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_vdec_g_fmt(struct file *file, void *priv,
+	struct v4l2_format *f)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+	struct v4l2_pix_format *pix = &f->fmt.pix;
+	struct vb2_queue *vq;
+	struct aml_q_data *q_data;
+	int ret = 0;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"no vb2 queue for type=%d\n", f->type);
+		return -EINVAL;
+	}
+
+	q_data = aml_vdec_get_q_data(ctx, f->type);
+
+	ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"GET_PARAM_PICTURE_INFO err\n");
+	}
+
+	if (V4L2_TYPE_IS_MULTIPLANAR(f->type)) {
+		pix_mp->field = ret ? V4L2_FIELD_NONE : ctx->picinfo.field;
+		pix_mp->colorspace = ctx->colorspace;
+		pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+		pix_mp->quantization = ctx->quantization;
+		pix_mp->xfer_func = ctx->xfer_func;
+	} else {
+		pix->field = ret ? V4L2_FIELD_NONE : ctx->picinfo.field;
+		pix->colorspace = ctx->colorspace;
+		pix->ycbcr_enc = ctx->ycbcr_enc;
+		pix->quantization = ctx->quantization;
+		pix->xfer_func = ctx->xfer_func;
+	}
+
+	if ((!V4L2_TYPE_IS_OUTPUT(f->type)) &&
+	    (ctx->state >= AML_STATE_PROBE)) {
+		/* Until STREAMOFF is called on the CAPTURE queue
+		 * (acknowledging the event), the driver operates as if
+		 * the resolution hasn't changed yet.
+		 * So we just return picinfo yet, and update picinfo in
+		 * stop_streaming hook function
+		 */
+		/* it is used for alloc the decode buffer size. */
+		q_data->sizeimage[0] = ctx->picinfo.y_len_sz;
+		q_data->sizeimage[1] = ctx->picinfo.c_len_sz;
+
+		/* it is used for alloc the EGL image buffer size. */
+		q_data->coded_width = ctx->picinfo.coded_width;
+		q_data->coded_height = ctx->picinfo.coded_height;
+
+		q_data->bytesperline[0] = ctx->picinfo.coded_width;
+		q_data->bytesperline[1] = ctx->picinfo.coded_width;
+
+		copy_v4l2_format_dimention(pix_mp, q_data, f->type);
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/*
+		 * This is run on OUTPUT
+		 * The buffer contains compressed image
+		 * so width and height have no meaning.
+		 * Assign value here to pass v4l2-compliance test
+		 */
+		copy_v4l2_format_dimention(pix_mp, q_data, f->type);
+	} else {
+		copy_v4l2_format_dimention(pix_mp, q_data, f->type);
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+			"type=%d state=%d Format information could not be read, not ready yet!\n",
+			f->type, ctx->state);
+		return -EINVAL;
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %u, planes: %u, fmt: %u\n",
+		__func__, f->type, f->fmt.pix_mp.num_planes,
+		f->fmt.pix_mp.pixelformat);
+
+	return 0;
+}
+
+static int vidioc_vdec_create_bufs(struct file *file, void *priv,
+	struct v4l2_create_buffers *create)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %u, count: %u\n",
+		__func__, create->format.type, create->count);
+
+	return v4l2_m2m_ioctl_create_bufs(file, priv, create);
+}
+
+/*int vidioc_vdec_g_ctrl(struct file *file, void *fh,
+	struct v4l2_control *a)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, id: %d\n", __func__, a->id);
+
+	if (a->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE)
+		a->value = 4;
+	else if (a->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT)
+		a->value = 8;
+
+	return 0;
+}*/
+
+static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
+				unsigned int *nbuffers,
+				unsigned int *nplanes,
+				unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+	struct aml_q_data *q_data;
+	unsigned int i;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d\n",
+		__func__, vq->type);
+
+	q_data = aml_vdec_get_q_data(ctx, vq->type);
+	if (q_data == NULL) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"vq->type=%d err\n", vq->type);
+		return -EINVAL;
+	}
+
+	if (*nplanes) {
+		for (i = 0; i < *nplanes; i++) {
+			if (sizes[i] < q_data->sizeimage[i])
+				return -EINVAL;
+			//alloc_devs[i] = &ctx->dev->plat_dev->dev;
+			alloc_devs[i] = v4l_get_dev_from_codec_mm();//alloc mm from the codec mm
+		}
+	} else {
+		if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+			*nplanes = 2;
+		else
+			*nplanes = 1;
+
+		for (i = 0; i < *nplanes; i++) {
+			sizes[i] = q_data->sizeimage[i];
+			if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->output_dma_mode)
+				sizes[i] = 0;
+			//alloc_devs[i] = &ctx->dev->plat_dev->dev;
+			alloc_devs[i] = v4l_get_dev_from_codec_mm();//alloc mm from the codec mm
+		}
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"type: %d, plane: %d, buf cnt: %d, size: [Y: %u, C: %u]\n",
+		vq->type, *nplanes, *nbuffers, sizes[0], sizes[1]);
+
+	return 0;
+}
+
+static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct aml_q_data *q_data;
+	int i;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d, idx: %d\n",
+		__func__, vb->vb2_queue->type, vb->index);
+
+	if (vb->memory == VB2_MEMORY_DMABUF
+		&& V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+		return 0;
+
+	q_data = aml_vdec_get_q_data(ctx, vb->vb2_queue->type);
+
+	for (i = 0; i < q_data->fmt->num_planes; i++) {
+		if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+				"data will not fit into plane %d (%lu < %d)\n",
+				i, vb2_plane_size(vb, i),
+				q_data->sizeimage[i]);
+		}
+	}
+
+	return 0;
+}
+
+static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+	struct aml_video_dec_buf *buf = NULL;
+	struct aml_vcodec_mem src_mem;
+	unsigned int dpb = 0;
+
+	vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+	buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, vb: %lx, type: %d, idx: %d, state: %d, used: %d, ts: %llu\n",
+		__func__, (ulong) vb, vb->vb2_queue->type,
+		vb->index, vb->state, buf->used, vb->timestamp);
+	/*
+	 * check if this buffer is ready to be used after decode
+	 */
+	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		if (vb->index >= ctx->dpb_size) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"enque capture buf idx %d/%d is invalid.\n",
+				vb->index, ctx->dpb_size);
+			return;
+		}
+
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO,
+			"y_addr: %lx, vf_h: %lx, state: %d",
+			buf->frame_buffer.m.mem[0].addr,
+			buf->frame_buffer.vf_handle,
+			buf->frame_buffer.status);
+
+		if (!buf->que_in_m2m) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"enque capture buf idx %d, vf: %lx\n",
+				vb->index, (ulong) v4l_get_vf_handle(vb2_v4l2->private));
+
+			v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+			buf->que_in_m2m = true;
+			buf->queued_in_vb2 = true;
+			buf->queued_in_v4l2 = true;
+			buf->ready_to_display = false;
+			ctx->cap_pool.seq[ctx->cap_pool.in++] =
+				(V4L_CAP_BUFF_IN_M2M << 16 | vb->index);
+
+			/* check dpb ready */
+			aml_check_dpb_ready(ctx);
+		} else if (buf->frame_buffer.status == FB_ST_DISPLAY) {
+			buf->queued_in_vb2 = false;
+			buf->queued_in_v4l2 = true;
+			buf->ready_to_display = false;
+
+			/* recycle vf */
+			video_vf_put(ctx->ada_ctx->recv_name,
+				&buf->frame_buffer, ctx->id);
+		}
+		return;
+	}
+
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+
+	if (ctx->state != AML_STATE_INIT) {
+		return;
+	}
+
+	vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+	buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+	if (buf->lastframe) {
+		/* This shouldn't happen. Just in case. */
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Invalid flush buffer.\n");
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		return;
+	}
+
+	src_mem.index	= vb->index;
+	src_mem.vaddr	= vb2_plane_vaddr(vb, 0);
+	src_mem.addr	= vb2_dma_contig_plane_dma_addr(vb, 0);
+	src_mem.size	= vb->planes[0].bytesused;
+	src_mem.model	= vb->memory;
+	src_mem.timestamp = vb->timestamp;
+
+	if (vdec_if_probe(ctx, &src_mem, NULL)) {
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+		if (!(ctx->is_drm_mode && src_mem.model == VB2_MEMORY_DMABUF))
+			v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_DONE);
+		return;
+	}
+
+	/*
+	 * If on model dmabuf must remove the buffer
+	 * because this data has been consumed by hw.
+	 */
+	if (ctx->is_drm_mode && src_mem.model == VB2_MEMORY_DMABUF) {
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	} else if (ctx->param_sets_from_ucode) {
+		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb),
+			VB2_BUF_STATE_DONE);
+	}
+
+	if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"GET_PARAM_PICTURE_INFO err\n");
+		return;
+	}
+
+	if (vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpb)) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"GET_PARAM_DPB_SIZE err\n");
+		return;
+	}
+
+	if (!dpb)
+		return;
+
+	ctx->dpb_size = dpb;
+	ctx->last_decoded_picinfo = ctx->picinfo;
+	aml_vdec_dispatch_event(ctx, V4L2_EVENT_SRC_CH_RESOLUTION);
+
+	mutex_lock(&ctx->state_lock);
+	if (ctx->state == AML_STATE_INIT) {
+		ctx->state = AML_STATE_PROBE;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_PROBE)\n");
+	}
+	mutex_unlock(&ctx->state_lock);
+}
+
+static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+	struct aml_video_dec_buf *buf = NULL;
+	bool buf_error;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d, idx: %d\n",
+		__func__, vb->vb2_queue->type, vb->index);
+
+	vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+	buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+
+	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		buf->queued_in_v4l2 = false;
+		buf->queued_in_vb2 = false;
+	}
+	buf_error = buf->error;
+
+	if (buf_error) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Unrecoverable error on buffer.\n");
+		ctx->state = AML_STATE_ABORT;
+		ATRACE_COUNTER("v4l2_state", ctx->state);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_STATE,
+			"vcodec state (AML_STATE_ABORT)\n");
+	}
+}
+
+static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+					struct vb2_v4l2_buffer, vb2_buf);
+	struct aml_video_dec_buf *buf = container_of(vb2_v4l2,
+					struct aml_video_dec_buf, vb);
+	unsigned int size, phy_addr = 0;
+	char *owner = __getname();
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s, type: %d, idx: %d\n",
+		__func__, vb->vb2_queue->type, vb->index);
+
+	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		buf->used = false;
+		buf->ready_to_display = false;
+		buf->queued_in_v4l2 = false;
+		buf->frame_buffer.status = FB_ST_NORMAL;
+	} else {
+		buf->lastframe = false;
+	}
+
+	/* codec_mm buffers count */
+	if (V4L2_TYPE_IS_OUTPUT(vb->type)) {
+		if (vb->memory == VB2_MEMORY_MMAP) {
+			size = vb->planes[0].length;
+			phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+			snprintf(owner, PATH_MAX, "%s-%d", "v4l-input", ctx->id);
+			strncpy(buf->mem_onwer, owner, sizeof(buf->mem_onwer));
+			buf->mem_onwer[sizeof(buf->mem_onwer) - 1] = '\0';
+
+			buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+					phy_addr, size, vb->index);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"IN alloc, addr: %x, size: %u, idx: %u\n",
+				phy_addr, size, vb->index);
+		}
+	} else {
+		snprintf(owner, PATH_MAX, "%s-%d", "v4l-output", ctx->id);
+		strncpy(buf->mem_onwer, owner, sizeof(buf->mem_onwer));
+		buf->mem_onwer[sizeof(buf->mem_onwer) - 1] = '\0';
+
+		if ((vb->memory == VB2_MEMORY_MMAP) && (vb->num_planes == 1)) {
+			size = vb->planes[0].length;
+			phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+			buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+				phy_addr, size, vb->index);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"OUT Y alloc, addr: %x, size: %u, idx: %u\n",
+				phy_addr, size, vb->index);
+		} else if ((vb->memory == VB2_MEMORY_MMAP) && (vb->num_planes == 2)) {
+			size = vb->planes[0].length;
+			phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+			buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+				phy_addr, size, vb->index);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"OUT Y alloc, addr: %x, size: %u, idx: %u\n",
+				phy_addr, size, vb->index);
+
+			size = vb->planes[1].length;
+			phy_addr = vb2_dma_contig_plane_dma_addr(vb, 1);
+			buf->mem[1] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+					phy_addr, size, vb->index);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"OUT C alloc, addr: %x, size: %u, idx: %u\n",
+				phy_addr, size, vb->index);
+		}
+	}
+
+	__putname(owner);
+
+	return 0;
+}
+
+static void codec_mm_bufs_cnt_clean(struct vb2_queue *q)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+	struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+	struct aml_video_dec_buf *buf = NULL;
+	int i;
+
+	for (i = 0; i < q->num_buffers; ++i) {
+		vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]);
+		buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+		if (IS_ERR_OR_NULL(buf->mem[0]))
+			return;
+
+		if (V4L2_TYPE_IS_OUTPUT(q->bufs[i]->type)) {
+			v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[0]);
+
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"IN clean, addr: %lx, size: %u, idx: %u\n",
+				buf->mem[0]->phy_addr, buf->mem[0]->buffer_size, i);
+			buf->mem[0] = NULL;
+			continue;
+		}
+
+		if (q->memory == VB2_MEMORY_MMAP) {
+			v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[0]);
+			v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[1]);
+
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"OUT Y clean, addr: %lx, size: %u, idx: %u\n",
+				buf->mem[0]->phy_addr, buf->mem[0]->buffer_size, i);
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_BUFMGR,
+				"OUT C clean, addr: %lx, size: %u, idx: %u\n",
+				buf->mem[1]->phy_addr, buf->mem[1]->buffer_size, i);
+			buf->mem[0] = NULL;
+			buf->mem[1] = NULL;
+		}
+	}
+}
+
+static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+	ctx->has_receive_eos = false;
+
+	v4l2_m2m_set_dst_buffered(ctx->fh.m2m_ctx, true);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d\n", __func__, q->type);
+
+	return 0;
+}
+
+static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+{
+	struct aml_video_dec_buf *buf = NULL;
+	struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
+	struct aml_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+	int i;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, type: %d, state: %x, frame_cnt: %d\n",
+		__func__, q->type, ctx->state, ctx->decoded_frame_cnt);
+
+	codec_mm_bufs_cnt_clean(q);
+
+	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+		while ((vb2_v4l2 = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+			v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
+
+		for (i = 0; i < q->num_buffers; ++i) {
+			vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]);
+			if (vb2_v4l2->vb2_buf.state == VB2_BUF_STATE_ACTIVE)
+				v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
+		}
+	} else {
+		/* clean output cache and decoder status . */
+		if (ctx->state > AML_STATE_INIT)
+			aml_vdec_reset(ctx);
+
+		while ((vb2_v4l2 = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx)))
+			v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
+
+		for (i = 0; i < q->num_buffers; ++i) {
+			vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]);
+			buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+			buf->frame_buffer.status = FB_ST_NORMAL;
+			buf->que_in_m2m = false;
+			buf->vb.flags = 0;
+			ctx->cap_pool.seq[i] = 0;
+
+			if (vb2_v4l2->vb2_buf.state == VB2_BUF_STATE_ACTIVE)
+				v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
+
+			/*v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "idx: %d, state: %d\n",
+				q->bufs[i]->index, q->bufs[i]->state);*/
+		}
+
+		ctx->buf_used_count = 0;
+		ctx->cap_pool.in = 0;
+		ctx->cap_pool.out = 0;
+		ctx->cap_pool.dec = 0;
+		ctx->cap_pool.vpp = 0;
+	}
+}
+
+static void m2mops_vdec_device_run(void *priv)
+{
+	struct aml_vcodec_ctx *ctx = priv;
+	struct aml_vcodec_dev *dev = ctx->dev;
+
+	if (ctx->output_thread_ready)
+		queue_work(dev->decode_workqueue, &ctx->decode_work);
+}
+
+void vdec_device_vf_run(struct aml_vcodec_ctx *ctx)
+{
+	if (ctx->state < AML_STATE_INIT ||
+		ctx->state > AML_STATE_FLUSHED)
+		return;
+
+	aml_thread_notify(ctx, AML_THREAD_CAPTURE);
+}
+
+static int m2mops_vdec_job_ready(void *m2m_priv)
+{
+	struct aml_vcodec_ctx *ctx = m2m_priv;
+
+	if (ctx->state < AML_STATE_PROBE ||
+		ctx->state > AML_STATE_FLUSHED)
+		return 0;
+
+	return 1;
+}
+
+static void m2mops_vdec_job_abort(void *priv)
+{
+	struct aml_vcodec_ctx *ctx = priv;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__);
+}
+
+static int aml_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct aml_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+	int ret = 0;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT,
+		"%s, id: %d\n", __func__, ctrl->id);
+
+	switch (ctrl->id) {
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (ctx->state >= AML_STATE_PROBE) {
+			ctrl->val = ctx->dpb_size;
+		} else {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+				"Seqinfo not ready.\n");
+			ctrl->val = 0;
+			ret = -EINVAL;
+		}
+		break;
+	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		ctrl->val = 4;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int aml_vdec_try_s_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct aml_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__);
+
+	if (ctrl->id == AML_V4L2_SET_DRMMODE) {
+		ctx->is_drm_mode = ctrl->val;
+		ctx->param_sets_from_ucode = true;
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO,
+			"set stream mode: %x\n", ctrl->val);
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops aml_vcodec_dec_ctrl_ops = {
+	.g_volatile_ctrl = aml_vdec_g_v_ctrl,
+	.try_ctrl = aml_vdec_try_s_v_ctrl,
+};
+
+static const struct v4l2_ctrl_config ctrl_st_mode = {
+	.name	= "drm mode",
+	.id	= AML_V4L2_SET_DRMMODE,
+	.ops	= &aml_vcodec_dec_ctrl_ops,
+	.type	= V4L2_CTRL_TYPE_BOOLEAN,
+	.flags	= V4L2_CTRL_FLAG_WRITE_ONLY,
+	.min	= 0,
+	.max	= 1,
+	.step	= 1,
+	.def	= 0,
+};
+
+int aml_vcodec_dec_ctrls_setup(struct aml_vcodec_ctx *ctx)
+{
+	int ret;
+	struct v4l2_ctrl *ctrl;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 3);
+	ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+				&aml_vcodec_dec_ctrl_ops,
+				V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+				0, 32, 1, 2);
+	ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+	if (ctx->ctrl_hdl.error) {
+		ret = ctx->ctrl_hdl.error;
+		goto err;
+	}
+
+	ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+				&aml_vcodec_dec_ctrl_ops,
+				V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+				0, 32, 1, 8);
+	ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+	if (ctx->ctrl_hdl.error) {
+		ret = ctx->ctrl_hdl.error;
+		goto err;
+	}
+
+	ctrl = v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &ctrl_st_mode, NULL);
+	if (ctx->ctrl_hdl.error) {
+		ret = ctx->ctrl_hdl.error;
+		goto err;
+	}
+
+	v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+	return 0;
+err:
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+		"Adding control failed %d\n",
+		ctx->ctrl_hdl.error);
+	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+	return ret;
+}
+
+static int vidioc_vdec_g_parm(struct file *file, void *fh,
+	struct v4l2_streamparm *a)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+	if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		if (vdec_if_get_param(ctx, GET_PARAM_CONFIG_INFO,
+			&ctx->config.parm.dec)) {
+			v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+				"GET_PARAM_CONFIG_INFO err\n");
+			return -1;
+		}
+		memcpy(a->parm.raw_data, ctx->config.parm.data,
+			sizeof(a->parm.raw_data));
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__);
+
+	return 0;
+}
+
+static int vidioc_vdec_s_parm(struct file *file, void *fh,
+	struct v4l2_streamparm *a)
+{
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PROT, "%s\n", __func__);
+
+	if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		struct aml_dec_params *in =
+			(struct aml_dec_params *) a->parm.raw_data;
+		struct aml_dec_params *dec = &ctx->config.parm.dec;
+
+		ctx->config.type = V4L2_CONFIG_PARM_DECODE;
+
+		if (in->parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO)
+			dec->cfg = in->cfg;
+		if (in->parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO)
+			dec->ps = in->ps;
+		if (in->parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO)
+			dec->hdr = in->hdr;
+		if (in->parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO)
+			dec->cnt = in->cnt;
+
+		dec->parms_status |= in->parms_status;
+	}
+
+	return 0;
+}
+
+static void m2mops_vdec_lock(void *m2m_priv)
+{
+	struct aml_vcodec_ctx *ctx = m2m_priv;
+
+	mutex_lock(&ctx->dev->dev_mutex);
+}
+
+static void m2mops_vdec_unlock(void *m2m_priv)
+{
+	struct aml_vcodec_ctx *ctx = m2m_priv;
+
+	mutex_unlock(&ctx->dev->dev_mutex);
+}
+
+const struct v4l2_m2m_ops aml_vdec_m2m_ops = {
+	.device_run	= m2mops_vdec_device_run,
+	.job_ready	= m2mops_vdec_job_ready,
+	.job_abort	= m2mops_vdec_job_abort,
+	.lock		= m2mops_vdec_lock,
+	.unlock		= m2mops_vdec_unlock,
+};
+
+static const struct vb2_ops aml_vdec_vb2_ops = {
+	.queue_setup	= vb2ops_vdec_queue_setup,
+	.buf_prepare	= vb2ops_vdec_buf_prepare,
+	.buf_queue	= vb2ops_vdec_buf_queue,
+	.wait_prepare	= vb2_ops_wait_prepare,
+	.wait_finish	= vb2_ops_wait_finish,
+	.buf_init	= vb2ops_vdec_buf_init,
+	.buf_finish	= vb2ops_vdec_buf_finish,
+	.start_streaming = vb2ops_vdec_start_streaming,
+	.stop_streaming	= vb2ops_vdec_stop_streaming,
+};
+
+const struct v4l2_ioctl_ops aml_vdec_ioctl_ops = {
+	.vidioc_streamon		= vidioc_decoder_streamon,
+	.vidioc_streamoff		= vidioc_decoder_streamoff,
+	.vidioc_reqbufs			= vidioc_decoder_reqbufs,
+	.vidioc_querybuf		= vidioc_vdec_querybuf,
+	.vidioc_expbuf			= vidioc_vdec_expbuf,
+	//.vidioc_g_ctrl		= vidioc_vdec_g_ctrl,
+
+	.vidioc_qbuf			= vidioc_vdec_qbuf,
+	.vidioc_dqbuf			= vidioc_vdec_dqbuf,
+
+	.vidioc_try_fmt_vid_cap_mplane	= vidioc_try_fmt_vid_cap_mplane,
+	.vidioc_try_fmt_vid_cap		= vidioc_try_fmt_vid_cap_mplane,
+	.vidioc_try_fmt_vid_out_mplane	= vidioc_try_fmt_vid_out_mplane,
+	.vidioc_try_fmt_vid_out		= vidioc_try_fmt_vid_out_mplane,
+
+	.vidioc_s_fmt_vid_cap_mplane	= vidioc_vdec_s_fmt,
+	.vidioc_s_fmt_vid_cap		= vidioc_vdec_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane	= vidioc_vdec_s_fmt,
+	.vidioc_s_fmt_vid_out		= vidioc_vdec_s_fmt,
+	.vidioc_g_fmt_vid_cap_mplane	= vidioc_vdec_g_fmt,
+	.vidioc_g_fmt_vid_cap		= vidioc_vdec_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane	= vidioc_vdec_g_fmt,
+	.vidioc_g_fmt_vid_out		= vidioc_vdec_g_fmt,
+
+	.vidioc_create_bufs		= vidioc_vdec_create_bufs,
+
+	.vidioc_enum_fmt_vid_cap_mplane	= vidioc_vdec_enum_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_cap	= vidioc_vdec_enum_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_out_mplane	= vidioc_vdec_enum_fmt_vid_out_mplane,
+	.vidioc_enum_fmt_vid_out	= vidioc_vdec_enum_fmt_vid_out_mplane,
+	.vidioc_enum_framesizes		= vidioc_enum_framesizes,
+
+	.vidioc_querycap		= vidioc_vdec_querycap,
+	.vidioc_subscribe_event		= vidioc_vdec_subscribe_evt,
+	.vidioc_unsubscribe_event	= vidioc_vdec_event_unsubscribe,
+	.vidioc_g_selection             = vidioc_vdec_g_selection,
+	.vidioc_s_selection             = vidioc_vdec_s_selection,
+
+	.vidioc_decoder_cmd		= vidioc_decoder_cmd,
+	.vidioc_try_decoder_cmd		= vidioc_try_decoder_cmd,
+
+	.vidioc_g_parm			= vidioc_vdec_g_parm,
+	.vidioc_s_parm			= vidioc_vdec_s_parm,
+};
+
+int aml_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+			   struct vb2_queue *dst_vq)
+{
+	struct aml_vcodec_ctx *ctx = priv;
+	int ret = 0;
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_EXINFO, "%s\n", __func__);
+
+	src_vq->type		= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes	= VB2_DMABUF | VB2_MMAP;
+	src_vq->drv_priv	= ctx;
+	src_vq->buf_struct_size = sizeof(struct aml_video_dec_buf);
+	src_vq->ops		= &aml_vdec_vb2_ops;
+	src_vq->mem_ops		= &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock		= &ctx->dev->dev_mutex;
+	ret = vb2_queue_init(src_vq);
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Failed to initialize videobuf2 queue(output)\n");
+		return ret;
+	}
+
+	dst_vq->type		= multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+					V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes	= VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+	dst_vq->drv_priv	= ctx;
+	dst_vq->buf_struct_size = sizeof(struct aml_video_dec_buf);
+	dst_vq->ops		= &aml_vdec_vb2_ops;
+	dst_vq->mem_ops		= &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock		= &ctx->dev->dev_mutex;
+	ret = vb2_queue_init(dst_vq);
+	if (ret) {
+		vb2_queue_release(src_vq);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Failed to initialize videobuf2 queue(capture)\n");
+	}
+
+	return ret;
+}
+
diff --git a/drivers/amvdec_ports/aml_vcodec_dec.h b/drivers/amvdec_ports/aml_vcodec_dec.h
new file mode 100644
index 0000000..3653ff0
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_dec.h
@@ -0,0 +1,126 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef _AML_VCODEC_DEC_H_
+#define _AML_VCODEC_DEC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+#include <linux/amlogic/media/codec_mm/codec_mm.h>
+#include <linux/amlogic/media/video_sink/v4lvideo_ext.h>
+#include "aml_vcodec_util.h"
+
+#define VCODEC_CAPABILITY_4K_DISABLED	0x10
+#define VCODEC_DEC_4K_CODED_WIDTH	4096U
+#define VCODEC_DEC_4K_CODED_HEIGHT	2304U
+#define AML_VDEC_MAX_W			2048U
+#define AML_VDEC_MAX_H			1088U
+
+#define AML_VDEC_IRQ_STATUS_DEC_SUCCESS	0x10000
+#define V4L2_BUF_FLAG_LAST		0x00100000
+
+#define VDEC_GATHER_MEMORY_TYPE		0
+#define VDEC_SCATTER_MEMORY_TYPE	1
+
+/**
+ * struct vdec_fb  - decoder frame buffer
+ * @mem_type	: gather or scatter memory.
+ * @num_planes	: used number of the plane
+ * @mem[4]	: array mem for used planes,
+ *		  mem[0]: Y, mem[1]: C/U, mem[2]: V
+ * @vf_fd	: the file handle of video frame
+ * @vf_handle	: video frame handle
+ * @status      : frame buffer status (vdec_fb_status)
+ */
+
+struct vdec_v4l2_buffer {
+	int	mem_type;
+	int	num_planes;
+	union {
+		struct	aml_vcodec_mem mem[4];
+		u32	vf_fd;
+	} m;
+	ulong	vf_handle;
+	u32	status;
+	u32	buf_idx;
+};
+
+
+/**
+ * struct aml_video_dec_buf - Private data related to each VB2 buffer.
+ * @b:		VB2 buffer
+ * @list:	link list
+ * @used:	Capture buffer contain decoded frame data and keep in
+ *			codec data structure
+ * @ready_to_display:	Capture buffer not display yet
+ * @queued_in_vb2:	Capture buffer is queue in vb2
+ * @queued_in_v4l2:	Capture buffer is in v4l2 driver, but not in vb2
+ *			queue yet
+ * @lastframe:		Intput buffer is last buffer - EOS
+ * @error:		An unrecoverable error occurs on this buffer.
+ * @frame_buffer:	Decode status, and buffer information of Capture buffer
+ *
+ * Note : These status information help us track and debug buffer state
+ */
+struct aml_video_dec_buf {
+	struct vb2_v4l2_buffer vb;
+	struct list_head list;
+
+	struct vdec_v4l2_buffer frame_buffer;
+	struct file_private_data privdata;
+	struct codec_mm_s *mem[2];
+	char mem_onwer[32];
+	bool used;
+	bool ready_to_display;
+	bool que_in_m2m;
+	bool queued_in_vb2;
+	bool queued_in_v4l2;
+	bool lastframe;
+	bool error;
+};
+
+extern const struct v4l2_ioctl_ops aml_vdec_ioctl_ops;
+extern const struct v4l2_m2m_ops aml_vdec_m2m_ops;
+
+/*
+ * aml_vdec_lock/aml_vdec_unlock are for ctx instance to
+ * get/release lock before/after access decoder hw.
+ * aml_vdec_lock get decoder hw lock and set curr_ctx
+ * to ctx instance that get lock
+ */
+void aml_vdec_unlock(struct aml_vcodec_ctx *ctx);
+void aml_vdec_lock(struct aml_vcodec_ctx *ctx);
+int aml_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+			   struct vb2_queue *dst_vq);
+void aml_vcodec_dec_set_default_params(struct aml_vcodec_ctx *ctx);
+void aml_vcodec_dec_release(struct aml_vcodec_ctx *ctx);
+int aml_vcodec_dec_ctrls_setup(struct aml_vcodec_ctx *ctx);
+void vdec_device_vf_run(struct aml_vcodec_ctx *ctx);
+void try_to_capture(struct aml_vcodec_ctx *ctx);
+void aml_thread_notify(struct aml_vcodec_ctx *ctx,
+	enum aml_thread_type type);
+int aml_thread_start(struct aml_vcodec_ctx *ctx, aml_thread_func func,
+	enum aml_thread_type type, const char *thread_name);
+void aml_thread_stop(struct aml_vcodec_ctx *ctx);
+void wait_vcodec_ending(struct aml_vcodec_ctx *ctx);
+void vdec_frame_buffer_release(void *data);
+void aml_vdec_dispatch_event(struct aml_vcodec_ctx *ctx, u32 changes);
+void* v4l_get_vf_handle(int fd);
+
+#endif /* _AML_VCODEC_DEC_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_dec_drv.c b/drivers/amvdec_ports/aml_vcodec_dec_drv.c
new file mode 100644
index 0000000..1ed0b97
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_dec_drv.c
@@ -0,0 +1,635 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+
+#define DEBUG
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/kthread.h>
+
+#include "aml_vcodec_drv.h"
+#include "aml_vcodec_dec.h"
+#include "aml_vcodec_util.h"
+#include "aml_vcodec_vfm.h"
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+
+#define VDEC_HW_ACTIVE		0x10
+#define VDEC_IRQ_CFG		0x11
+#define VDEC_IRQ_CLR		0x10
+#define VDEC_IRQ_CFG_REG	0xa4
+
+#define V4LVIDEO_IOC_MAGIC  'I'
+#define V4LVIDEO_IOCTL_ALLOC_FD				_IOW(V4LVIDEO_IOC_MAGIC, 0x02, int)
+#define V4LVIDEO_IOCTL_CHECK_FD				_IOW(V4LVIDEO_IOC_MAGIC, 0x03, int)
+#define V4LVIDEO_IOCTL_SET_CONFIG_PARAMS	_IOWR(V4LVIDEO_IOC_MAGIC, 0x04, struct v4l2_config_parm)
+#define V4LVIDEO_IOCTL_GET_CONFIG_PARAMS	_IOWR(V4LVIDEO_IOC_MAGIC, 0x05, struct v4l2_config_parm)
+
+bool param_sets_from_ucode = 1;
+bool enable_drm_mode;
+
+static int fops_vcodec_open(struct file *file)
+{
+	struct aml_vcodec_dev *dev = video_drvdata(file);
+	struct aml_vcodec_ctx *ctx = NULL;
+	struct aml_video_dec_buf *aml_buf = NULL;
+	int ret = 0;
+	struct vb2_queue *src_vq;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+	aml_buf = kzalloc(sizeof(*aml_buf), GFP_KERNEL);
+	if (!aml_buf) {
+		kfree(ctx);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&dev->dev_mutex);
+	ctx->empty_flush_buf = aml_buf;
+	ctx->id = dev->id_counter++;
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	INIT_LIST_HEAD(&ctx->list);
+	INIT_LIST_HEAD(&ctx->vdec_thread_list);
+	dev->filp = file;
+	ctx->dev = dev;
+	init_waitqueue_head(&ctx->queue);
+	mutex_init(&ctx->state_lock);
+	mutex_init(&ctx->lock);
+	spin_lock_init(&ctx->slock);
+	init_completion(&ctx->comp);
+
+	ctx->param_sets_from_ucode = param_sets_from_ucode ? 1 : 0;
+
+	if (enable_drm_mode) {
+		ctx->is_drm_mode = true;
+		ctx->param_sets_from_ucode = true;
+	}
+
+	ctx->type = AML_INST_DECODER;
+	ret = aml_vcodec_dec_ctrls_setup(ctx);
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Failed to setup vcodec controls\n");
+		goto err_ctrls_setup;
+	}
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+		&aml_vcodec_dec_queue_init);
+	if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+		ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Failed to v4l2_m2m_ctx_init() (%d)\n", ret);
+		goto err_m2m_ctx_init;
+	}
+	src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+	ctx->output_thread_ready = true;
+	ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq;
+	ctx->empty_flush_buf->lastframe = true;
+	aml_vcodec_dec_set_default_params(ctx);
+
+	ret = aml_thread_start(ctx, try_to_capture, AML_THREAD_CAPTURE, "cap");
+	if (ret) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Failed to creat capture thread.\n");
+		goto err_creat_thread;
+	}
+
+	list_add(&ctx->list, &dev->ctx_list);
+
+	mutex_unlock(&dev->dev_mutex);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "%s decoder %lx\n",
+		dev_name(&dev->plat_dev->dev), (ulong)ctx);
+
+	return ret;
+
+	/* Deinit when failure occurred */
+err_creat_thread:
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx->empty_flush_buf);
+	kfree(ctx);
+	mutex_unlock(&dev->dev_mutex);
+
+	return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+	struct aml_vcodec_dev *dev = video_drvdata(file);
+	struct aml_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "release decoder %lx\n", (ulong) ctx);
+	mutex_lock(&dev->dev_mutex);
+
+	/*
+	 * Call v4l2_m2m_ctx_release before aml_vcodec_dec_release. First, it
+	 * makes sure the worker thread is not running after vdec_if_deinit.
+	 * Second, the decoder will be flushed and all the buffers will be
+	 * returned in stop_streaming.
+	 */
+	aml_thread_stop(ctx);
+	wait_vcodec_ending(ctx);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	aml_vcodec_dec_release(ctx);
+
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+	list_del_init(&ctx->list);
+	kfree(ctx->empty_flush_buf);
+	kfree(ctx);
+	mutex_unlock(&dev->dev_mutex);
+	return 0;
+}
+
+static int v4l2video_file_release(struct inode *inode, struct file *file)
+{
+	v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "file: %lx, data: %lx\n",
+		(ulong) file, (ulong) file->private_data);
+
+	if (file->private_data)
+		vdec_frame_buffer_release(file->private_data);
+
+	return 0;
+}
+
+const struct file_operations v4l2_file_fops = {
+	.release = v4l2video_file_release,
+};
+
+int v4l2_alloc_fd(int *fd)
+{
+	struct file *file = NULL;
+	int file_fd = get_unused_fd_flags(O_CLOEXEC);
+
+	if (file_fd < 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"get unused fd fail\n");
+		return -ENODEV;
+	}
+
+	file = anon_inode_getfile("v4l2_meta_file", &v4l2_file_fops, NULL, 0);
+	if (IS_ERR(file)) {
+		put_unused_fd(file_fd);
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"anon_inode_getfile fail\n");
+		return -ENODEV;
+	}
+
+	file->private_data =
+		kzalloc(sizeof(struct file_private_data), GFP_KERNEL);
+	if (!file->private_data) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"alloc priv data faild.\n");
+		return -ENOMEM;
+	}
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "fd %d, file %lx, data: %lx\n",
+		file_fd, (ulong) file, (ulong) file->private_data);
+
+	fd_install(file_fd, file);
+	*fd = file_fd;
+
+	return 0;
+}
+
+extern const struct file_operations v4l2_file_fops;
+bool is_v4l2_buf_file(struct file *file)
+{
+	return file->f_op == &v4l2_file_fops;
+}
+
+int v4l2_check_fd(int fd)
+{
+	struct file *file;
+
+	file = fget(fd);
+
+	if (!file) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"fget fd %d fail!\n", fd);
+		return -EBADF;
+	}
+
+	if (!is_v4l2_buf_file(file)) {
+		fput(file);
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"is_v4l2_buf_file fail!\n");
+		return -1;
+	}
+
+	fput(file);
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO,
+		"ioctl ok, comm %s, pid %d\n",
+		 current->comm, current->pid);
+
+	return 0;
+}
+
+int dmabuf_fd_install_data(int fd, void* data, u32 size)
+{
+	struct file *file;
+
+	file = fget(fd);
+
+	if (!file) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"fget fd %d fail!, comm %s, pid %d\n",
+			fd, current->comm, current->pid);
+		return -EBADF;
+	}
+
+	if (!is_v4l2_buf_file(file)) {
+		fput(file);
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the buf file checked fail!\n");
+		return -EBADF;
+	}
+
+	memcpy(file->private_data, data, size);
+
+	fput(file);
+
+	return 0;
+}
+
+void* v4l_get_vf_handle(int fd)
+{
+	struct file *file;
+	struct file_private_data *data = NULL;
+	void *vf_handle = 0;
+
+	file = fget(fd);
+
+	if (!file) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"fget fd %d fail!, comm %s, pid %d\n",
+			fd, current->comm, current->pid);
+		return NULL;
+	}
+
+	if (!is_v4l2_buf_file(file)) {
+		fput(file);
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the buf file checked fail!\n");
+		return NULL;
+	}
+
+	data = (struct file_private_data*) file->private_data;
+	if (data) {
+		vf_handle = &data->vf;
+		v4l_dbg(0, V4L_DEBUG_CODEC_BUFMGR, "file: %lx, data: %lx\n",
+			(ulong) file, (ulong) data);
+	}
+
+	fput(file);
+
+	return vf_handle;
+}
+
+
+static long v4l2_vcodec_ioctl(struct file *file,
+			unsigned int cmd,
+			ulong arg)
+{
+	long ret = 0;
+	void __user *argp = (void __user *)arg;
+
+	switch (cmd) {
+	case V4LVIDEO_IOCTL_ALLOC_FD:
+	{
+		u32 v4lvideo_fd = 0;
+
+		ret = v4l2_alloc_fd(&v4lvideo_fd);
+		if (ret != 0)
+			break;
+		put_user(v4lvideo_fd, (u32 __user *)argp);
+		v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO,
+			"V4LVIDEO_IOCTL_ALLOC_FD fd %d\n",
+			v4lvideo_fd);
+		break;
+	}
+	case V4LVIDEO_IOCTL_CHECK_FD:
+	{
+		u32 v4lvideo_fd = 0;
+
+		get_user(v4lvideo_fd, (u32 __user *)argp);
+		ret = v4l2_check_fd(v4lvideo_fd);
+		if (ret != 0)
+			break;
+		v4l_dbg(0, V4L_DEBUG_CODEC_EXINFO,
+			"V4LVIDEO_IOCTL_CHECK_FD fd %d\n",
+			v4lvideo_fd);
+		break;
+	}
+	case V4LVIDEO_IOCTL_SET_CONFIG_PARAMS:
+	{
+		struct aml_vcodec_ctx *ctx = NULL;
+
+		if (is_v4l2_buf_file(file))
+			break;
+
+		ctx = fh_to_ctx(file->private_data);
+		if (copy_from_user((void *)&ctx->config,
+			(void *)argp, sizeof(ctx->config))) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"set config parm err\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case V4LVIDEO_IOCTL_GET_CONFIG_PARAMS:
+	{
+		struct aml_vcodec_ctx *ctx = NULL;
+
+		if (is_v4l2_buf_file(file))
+			break;
+
+		ctx = fh_to_ctx(file->private_data);
+		if (copy_to_user((void *)argp,
+			(void *)&ctx->config, sizeof(ctx->config))) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"get config parm err\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	default:
+		return video_ioctl2(file, cmd, arg);
+	}
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long v4l2_compat_ioctl(struct file *file,
+	unsigned int cmd, ulong arg)
+{
+	long ret = 0;
+
+	ret = v4l2_vcodec_ioctl(file, cmd, (ulong)compat_ptr(arg));
+	return ret;
+}
+#endif
+
+static const struct v4l2_file_operations aml_vcodec_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fops_vcodec_open,
+	.release	= fops_vcodec_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl = v4l2_vcodec_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = v4l2_compat_ioctl,
+#endif
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static int aml_vcodec_probe(struct platform_device *pdev)
+{
+	struct aml_vcodec_dev *dev;
+	struct video_device *vfd_dec;
+	int ret = 0;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dev->ctx_list);
+	dev->plat_dev = pdev;
+
+	mutex_init(&dev->dec_mutex);
+	mutex_init(&dev->dev_mutex);
+	spin_lock_init(&dev->irqlock);
+
+	snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+		"[/AML_V4L2_VDEC]");
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"v4l2_device_register err=%d\n", ret);
+		goto err_res;
+	}
+
+	init_waitqueue_head(&dev->queue);
+
+	vfd_dec = video_device_alloc();
+	if (!vfd_dec) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto err_dec_alloc;
+	}
+
+	vfd_dec->fops		= &aml_vcodec_fops;
+	vfd_dec->ioctl_ops	= &aml_vdec_ioctl_ops;
+	vfd_dec->release	= video_device_release;
+	vfd_dec->lock		= &dev->dev_mutex;
+	vfd_dec->v4l2_dev	= &dev->v4l2_dev;
+	vfd_dec->vfl_dir	= VFL_DIR_M2M;
+	vfd_dec->device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE |
+				V4L2_CAP_STREAMING;
+
+	snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s",
+		AML_VCODEC_DEC_NAME);
+	video_set_drvdata(vfd_dec, dev);
+	dev->vfd_dec = vfd_dec;
+	platform_set_drvdata(pdev, dev);
+
+	dev->m2m_dev_dec = v4l2_m2m_init(&aml_vdec_m2m_ops);
+	if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Failed to init mem2mem dec device\n");
+		ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
+		goto err_dec_mem_init;
+	}
+
+	dev->decode_workqueue =
+		alloc_ordered_workqueue(AML_VCODEC_DEC_NAME,
+			WQ_MEM_RECLAIM | WQ_FREEZABLE);
+	if (!dev->decode_workqueue) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Failed to create decode workqueue\n");
+		ret = -EINVAL;
+		goto err_event_workq;
+	}
+
+	//dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+
+	ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 26);
+	if (ret) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Failed to register video device\n");
+		goto err_dec_reg;
+	}
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PRINFO,
+		"decoder registered as /dev/video%d\n", vfd_dec->num);
+
+	return 0;
+
+err_dec_reg:
+	destroy_workqueue(dev->decode_workqueue);
+err_event_workq:
+	v4l2_m2m_release(dev->m2m_dev_dec);
+err_dec_mem_init:
+	video_unregister_device(vfd_dec);
+err_dec_alloc:
+	v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+
+	return ret;
+}
+
+static const struct of_device_id aml_vcodec_match[] = {
+	{.compatible = "amlogic, vcodec-dec",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, aml_vcodec_match);
+
+static int aml_vcodec_dec_remove(struct platform_device *pdev)
+{
+	struct aml_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+	flush_workqueue(dev->decode_workqueue);
+	destroy_workqueue(dev->decode_workqueue);
+
+	if (dev->m2m_dev_dec)
+		v4l2_m2m_release(dev->m2m_dev_dec);
+
+	if (dev->vfd_dec)
+		video_unregister_device(dev->vfd_dec);
+
+	v4l2_device_unregister(&dev->v4l2_dev);
+
+	return 0;
+}
+
+/*static void aml_vcodec_dev_release(struct device *dev)
+{
+}*/
+
+static struct platform_driver aml_vcodec_dec_driver = {
+	.probe	= aml_vcodec_probe,
+	.remove	= aml_vcodec_dec_remove,
+	.driver	= {
+		.name	= AML_VCODEC_DEC_NAME,
+		.of_match_table = aml_vcodec_match,
+	},
+};
+
+/*
+static struct platform_device aml_vcodec_dec_device = {
+	.name		= AML_VCODEC_DEC_NAME,
+	.dev.release	= aml_vcodec_dev_release,
+};*/
+
+module_platform_driver(aml_vcodec_dec_driver);
+
+/*
+static int __init amvdec_ports_init(void)
+{
+	int ret;
+
+	ret = platform_device_register(&aml_vcodec_dec_device);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&aml_vcodec_dec_driver);
+	if (ret)
+		platform_device_unregister(&aml_vcodec_dec_device);
+
+	return ret;
+}
+
+static void __exit amvdec_ports_exit(void)
+{
+	platform_driver_unregister(&aml_vcodec_dec_driver);
+	platform_device_unregister(&aml_vcodec_dec_device);
+}
+
+module_init(amvdec_ports_init);
+module_exit(amvdec_ports_exit);
+*/
+
+u32 debug_mode;
+EXPORT_SYMBOL(debug_mode);
+module_param(debug_mode, uint, 0644);
+
+bool aml_set_vfm_enable;
+EXPORT_SYMBOL(aml_set_vfm_enable);
+module_param(aml_set_vfm_enable, bool, 0644);
+
+int aml_set_vfm_path;
+EXPORT_SYMBOL(aml_set_vfm_path);
+module_param(aml_set_vfm_path, int, 0644);
+
+bool aml_set_vdec_type_enable;
+EXPORT_SYMBOL(aml_set_vdec_type_enable);
+module_param(aml_set_vdec_type_enable, bool, 0644);
+
+int aml_set_vdec_type;
+EXPORT_SYMBOL(aml_set_vdec_type);
+module_param(aml_set_vdec_type, int, 0644);
+
+int vp9_need_prefix;
+EXPORT_SYMBOL(vp9_need_prefix);
+module_param(vp9_need_prefix, int, 0644);
+
+int av1_need_prefix;
+EXPORT_SYMBOL(av1_need_prefix);
+module_param(av1_need_prefix, int, 0644);
+
+bool multiplanar;
+EXPORT_SYMBOL(multiplanar);
+module_param(multiplanar, bool, 0644);
+
+bool dump_capture_frame;
+EXPORT_SYMBOL(dump_capture_frame);
+module_param(dump_capture_frame, bool, 0644);
+
+int dump_output_frame;
+EXPORT_SYMBOL(dump_output_frame);
+module_param(dump_output_frame, int, 0644);
+
+EXPORT_SYMBOL(param_sets_from_ucode);
+module_param(param_sets_from_ucode, bool, 0644);
+
+EXPORT_SYMBOL(enable_drm_mode);
+module_param(enable_drm_mode, bool, 0644);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("AML video codec V4L2 decoder driver");
+
diff --git a/drivers/amvdec_ports/aml_vcodec_drv.h b/drivers/amvdec_ports/aml_vcodec_drv.h
new file mode 100644
index 0000000..2e9e576
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_drv.h
@@ -0,0 +1,567 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef _AML_VCODEC_DRV_H_
+#define _AML_VCODEC_DRV_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <linux/amlogic/media/vfm/vframe.h>
+#include "aml_vcodec_util.h"
+
+#define AML_VCODEC_DRV_NAME	"aml_vcodec_drv"
+#define AML_VCODEC_DEC_NAME	"aml-vcodec-dec"
+#define AML_VCODEC_ENC_NAME	"aml-vcodec-enc"
+#define AML_PLATFORM_STR	"platform:amlogic"
+
+#define AML_VCODEC_MAX_PLANES	3
+#define AML_V4L2_BENCHMARK	0
+#define WAIT_INTR_TIMEOUT_MS	1000
+
+/* codec types of get/set parms. */
+#define V4L2_CONFIG_PARM_ENCODE		(0)
+#define V4L2_CONFIG_PARM_DECODE		(1)
+
+/* types of decode parms. */
+#define V4L2_CONFIG_PARM_DECODE_CFGINFO	(1 << 0)
+#define V4L2_CONFIG_PARM_DECODE_PSINFO	(1 << 1)
+#define V4L2_CONFIG_PARM_DECODE_HDRINFO	(1 << 2)
+#define V4L2_CONFIG_PARM_DECODE_CNTINFO	(1 << 3)
+
+/* amlogic event define. */
+/* #define V4L2_EVENT_SRC_CH_RESOLUTION	(1 << 0) */
+#define V4L2_EVENT_SRC_CH_HDRINFO	(1 << 1)
+#define V4L2_EVENT_SRC_CH_PSINFO	(1 << 2)
+#define V4L2_EVENT_SRC_CH_CNTINFO	(1 << 3)
+
+/* exception handing */
+#define V4L2_EVENT_REQUEST_RESET	(1 << 8)
+#define V4L2_EVENT_REQUEST_EXIT		(1 << 9)
+
+/* eos event */
+#define V4L2_EVENT_SEND_EOS		(1 << 16)
+
+/* v4l buffer pool */
+#define V4L_CAP_BUFF_MAX		(32)
+#define V4L_CAP_BUFF_INVALID		(0)
+#define V4L_CAP_BUFF_IN_M2M		(1)
+#define V4L_CAP_BUFF_IN_DEC		(2)
+
+/* v4l reset mode */
+#define V4L_RESET_MODE_NORMAL		(1 << 0) /* reset vdec_input and decoder. */
+#define V4L_RESET_MODE_LIGHT		(1 << 1) /* just only reset decoder. */
+
+/* m2m job queue's status */
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED		(1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING		(1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT		(1 << 2)
+
+/**
+ * enum aml_hw_reg_idx - AML hw register base index
+ */
+enum aml_hw_reg_idx {
+	VDEC_SYS,
+	VDEC_MISC,
+	VDEC_LD,
+	VDEC_TOP,
+	VDEC_CM,
+	VDEC_AD,
+	VDEC_AV,
+	VDEC_PP,
+	VDEC_HWD,
+	VDEC_HWQ,
+	VDEC_HWB,
+	VDEC_HWG,
+	NUM_MAX_VDEC_REG_BASE,
+	/* h264 encoder */
+	VENC_SYS = NUM_MAX_VDEC_REG_BASE,
+	/* vp8 encoder */
+	VENC_LT_SYS,
+	NUM_MAX_VCODEC_REG_BASE
+};
+
+/**
+ * enum aml_instance_type - The type of an AML Vcodec instance.
+ */
+enum aml_instance_type {
+	AML_INST_DECODER		= 0,
+	AML_INST_ENCODER		= 1,
+};
+
+/**
+ * enum aml_instance_state - The state of an AML Vcodec instance.
+ * @AML_STATE_IDLE	- default state when instance is created
+ * @AML_STATE_INIT	- vcodec instance is initialized
+ * @AML_STATE_PROBE	- vdec/venc had sps/pps header parsed/encoded
+ * @AML_STATE_ACTIVE	- vdec is ready for work.
+ * @AML_STATE_FLUSHING	- vdec is flushing. Only used by decoder
+ * @AML_STATE_FLUSHED	- decoder has transacted the last frame.
+ * @AML_STATE_RESET	- decoder has be reset after flush.
+ * @AML_STATE_ABORT	- vcodec should be aborted
+ */
+enum aml_instance_state {
+	AML_STATE_IDLE,
+	AML_STATE_INIT,
+	AML_STATE_PROBE,
+	AML_STATE_READY,
+	AML_STATE_ACTIVE,
+	AML_STATE_FLUSHING,
+	AML_STATE_FLUSHED,
+	AML_STATE_RESET,
+	AML_STATE_ABORT,
+};
+
+/**
+ * struct aml_encode_param - General encoding parameters type
+ */
+enum aml_encode_param {
+	AML_ENCODE_PARAM_NONE = 0,
+	AML_ENCODE_PARAM_BITRATE = (1 << 0),
+	AML_ENCODE_PARAM_FRAMERATE = (1 << 1),
+	AML_ENCODE_PARAM_INTRA_PERIOD = (1 << 2),
+	AML_ENCODE_PARAM_FORCE_INTRA = (1 << 3),
+	AML_ENCODE_PARAM_GOP_SIZE = (1 << 4),
+};
+
+enum aml_fmt_type {
+	AML_FMT_DEC = 0,
+	AML_FMT_ENC = 1,
+	AML_FMT_FRAME = 2,
+};
+
+/**
+ * struct aml_video_fmt - Structure used to store information about pixelformats
+ */
+struct aml_video_fmt {
+	u32	fourcc;
+	enum aml_fmt_type	type;
+	u32	num_planes;
+};
+
+/**
+ * struct aml_codec_framesizes - Structure used to store information about
+ *							framesizes
+ */
+struct aml_codec_framesizes {
+	u32	fourcc;
+	struct	v4l2_frmsize_stepwise	stepwise;
+};
+
+/**
+ * struct aml_q_type - Type of queue
+ */
+enum aml_q_type {
+	AML_Q_DATA_SRC = 0,
+	AML_Q_DATA_DST = 1,
+};
+
+
+/**
+ * struct aml_q_data - Structure used to store information about queue
+ */
+struct aml_q_data {
+	unsigned int	visible_width;
+	unsigned int	visible_height;
+	unsigned int	coded_width;
+	unsigned int	coded_height;
+	enum v4l2_field	field;
+	unsigned int	bytesperline[AML_VCODEC_MAX_PLANES];
+	unsigned int	sizeimage[AML_VCODEC_MAX_PLANES];
+	struct aml_video_fmt	*fmt;
+	bool resolution_changed;
+};
+
+/**
+ * struct aml_enc_params - General encoding parameters
+ * @bitrate: target bitrate in bits per second
+ * @num_b_frame: number of b frames between p-frame
+ * @rc_frame: frame based rate control
+ * @rc_mb: macroblock based rate control
+ * @seq_hdr_mode: H.264 sequence header is encoded separately or joined
+ *		  with the first frame
+ * @intra_period: I frame period
+ * @gop_size: group of picture size, it's used as the intra frame period
+ * @framerate_num: frame rate numerator. ex: framerate_num=30 and
+ *		   framerate_denom=1 menas FPS is 30
+ * @framerate_denom: frame rate denominator. ex: framerate_num=30 and
+ *		     framerate_denom=1 menas FPS is 30
+ * @h264_max_qp: Max value for H.264 quantization parameter
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @force_intra: force/insert intra frame
+ */
+struct aml_enc_params {
+	unsigned int	bitrate;
+	unsigned int	num_b_frame;
+	unsigned int	rc_frame;
+	unsigned int	rc_mb;
+	unsigned int	seq_hdr_mode;
+	unsigned int	intra_period;
+	unsigned int	gop_size;
+	unsigned int	framerate_num;
+	unsigned int	framerate_denom;
+	unsigned int	h264_max_qp;
+	unsigned int	h264_profile;
+	unsigned int	h264_level;
+	unsigned int	force_intra;
+};
+
+/**
+ * struct aml_vcodec_pm - Power management data structure
+ */
+struct aml_vcodec_pm {
+	struct clk	*vdec_bus_clk_src;
+	struct clk	*vencpll;
+
+	struct clk	*vcodecpll;
+	struct clk	*univpll_d2;
+	struct clk	*clk_cci400_sel;
+	struct clk	*vdecpll;
+	struct clk	*vdec_sel;
+	struct clk	*vencpll_d2;
+	struct clk	*venc_sel;
+	struct clk	*univpll1_d2;
+	struct clk	*venc_lt_sel;
+	struct device	*larbvdec;
+	struct device	*larbvenc;
+	struct device	*larbvenclt;
+	struct device	*dev;
+	struct aml_vcodec_dev	*amldev;
+};
+
+/**
+ * struct vdec_pic_info  - picture size information
+ * @visible_width: picture width
+ * @visible_height: picture height
+ * @coded_width: picture buffer width (64 aligned up from pic_w)
+ * @coded_height: picture buffer heiht (64 aligned up from pic_h)
+ * @y_bs_sz: Y bitstream size
+ * @c_bs_sz: CbCr bitstream size
+ * @y_len_sz: additional size required to store decompress information for y
+ *		plane
+ * @c_len_sz: additional size required to store decompress information for cbcr
+ *		plane
+ * E.g. suppose picture size is 176x144,
+ *      buffer size will be aligned to 176x160.
+ * @field: frame/field information.
+ */
+struct vdec_pic_info {
+	unsigned int visible_width;
+	unsigned int visible_height;
+	unsigned int coded_width;
+	unsigned int coded_height;
+	unsigned int y_bs_sz;
+	unsigned int c_bs_sz;
+	unsigned int y_len_sz;
+	unsigned int c_len_sz;
+	int profile_idc;
+	int ref_frame_count;
+	enum v4l2_field field;
+};
+
+struct aml_vdec_cfg_infos {
+	u32 double_write_mode;
+	u32 init_width;
+	u32 init_height;
+	u32 ref_buf_margin;
+	u32 canvas_mem_mode;
+	u32 canvas_mem_endian;
+	u32 low_latency_mode;
+};
+
+struct aml_vdec_hdr_infos {
+	/*
+	 * bit 29   : present_flag
+	 * bit 28-26: video_format "component", "PAL", "NTSC", "SECAM", "MAC", "unspecified"
+	 * bit 25   : range "limited", "full_range"
+	 * bit 24   : color_description_present_flag
+	 * bit 23-16: color_primaries "unknown", "bt709", "undef", "bt601",
+	 *            "bt470m", "bt470bg", "smpte170m", "smpte240m", "film", "bt2020"
+	 * bit 15-8 : transfer_characteristic unknown", "bt709", "undef", "bt601",
+	 *            "bt470m", "bt470bg", "smpte170m", "smpte240m",
+	 *            "linear", "log100", "log316", "iec61966-2-4",
+	 *            "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12",
+	 *            "smpte-st-2084", "smpte-st-428"
+	 * bit 7-0  : matrix_coefficient "GBR", "bt709", "undef", "bt601",
+	 *            "fcc", "bt470bg", "smpte170m", "smpte240m",
+	 *            "YCgCo", "bt2020nc", "bt2020c"
+	 */
+	u32 signal_type;
+	struct vframe_master_display_colour_s color_parms;
+};
+
+struct aml_vdec_ps_infos {
+	u32 visible_width;
+	u32 visible_height;
+	u32 coded_width;
+	u32 coded_height;
+	u32 profile;
+	u32 mb_width;
+	u32 mb_height;
+	u32 dpb_size;
+	u32 ref_frames;
+	u32 reorder_frames;
+	u32 field;
+};
+
+struct aml_vdec_cnt_infos {
+	u32 bit_rate;
+	u32 frame_count;
+	u32 error_frame_count;
+	u32 drop_frame_count;
+	u32 total_data;
+};
+
+struct aml_dec_params {
+	u32 parms_status;
+	struct aml_vdec_cfg_infos	cfg;
+	struct aml_vdec_ps_infos	ps;
+	struct aml_vdec_hdr_infos	hdr;
+	struct aml_vdec_cnt_infos	cnt;
+};
+
+struct v4l2_config_parm {
+	u32 type;
+	u32 length;
+	union {
+		struct aml_dec_params dec;
+		struct aml_enc_params enc;
+		u8 data[200];
+	} parm;
+	u8 buf[4096];
+};
+
+struct v4l_buff_pool {
+	/*
+	 * bit 31-16: buffer state
+	 * bit 15- 0: buffer index
+	 */
+	u32 seq[V4L_CAP_BUFF_MAX];
+	u32 in, out;
+	u32 dec, vpp;
+};
+
+enum aml_thread_type {
+	AML_THREAD_OUTPUT,
+	AML_THREAD_CAPTURE,
+};
+
+typedef void (*aml_thread_func)(struct aml_vcodec_ctx *ctx);
+
+struct aml_vdec_thread {
+	struct list_head node;
+	spinlock_t lock;
+	struct semaphore sem;
+	struct task_struct *task;
+	enum aml_thread_type type;
+	void *priv;
+	int stop;
+
+	aml_thread_func func;
+};
+
+/**
+ * struct aml_vcodec_ctx - Context (instance) private data.
+ *
+ * @id: index of the context that this structure describes.
+ * @type: type of the instance - decoder or encoder.
+ * @dev: pointer to the aml_vcodec_dev of the device.
+ * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context.
+ * @ada_ctx: pointer to the aml_vdec_adapt of the context.
+ * @dec_if: hooked decoder driver interface.
+ * @drv_handle: driver handle for specific decode instance
+ * @fh: struct v4l2_fh.
+ * @ctrl_hdl: handler for v4l2 framework.
+ * @slock: protect v4l2 codec context.
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush.
+ * @list: link to ctx_list of aml_vcodec_dev.
+ * @q_data: store information of input and output queue of the context.
+ * @queue: waitqueue that can be used to wait for this context to finish.
+ * @lock: protect the vdec thread.
+ * @state_lock: protect the codec status.
+ * @state: state of the context.
+ * @decode_work: decoder work be used to output buffer.
+ * @output_thread_ready: indicate the output thread ready.
+ * @cap_pool: capture buffers are remark in the pool.
+ * @vdec_thread_list: vdec thread be used to capture.
+ * @dpb_size: store dpb count after header parsing
+ * @param_change: indicate encode parameter type
+ * @param_sets_from_ucode: if true indicate ps from ucode.
+ * @v4l_codec_dpb_ready: queue buffer number greater than dpb.
+ # @v4l_resolution_change: indicate resolution change happend.
+ * @comp: comp be used for sync picture information with decoder.
+ * @config: used to set or get parms for application.
+ * @picinfo: store picture info after header parsing.
+ * @last_decoded_picinfo: pic information get from latest decode.
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat.
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding.
+ * @quantization: enum v4l2_quantization, colorspace quantization.
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function.
+ * @cap_pix_fmt: the picture format used to switch nv21 or nv12.
+ * @has_receive_eos: if receive last frame of capture that be set.
+ * @is_drm_mode: decoding work on drm mode if that set.
+ * @is_stream_mode: vdec input used to stream mode, default frame mode.
+ * @is_stream_off: the value used to handle reset active.
+ * @is_out_stream_off: streamoff called for output port.
+ * @receive_cmd_stop: if receive the cmd flush decoder.
+ * @reset_flag: reset mode includes lightly and normal mode.
+ * @decoded_frame_cnt: the capture buffer deque number to be count.
+ * @buf_used_count: means that decode allocate how many buffs from v4l.
+ */
+struct aml_vcodec_ctx {
+	int				id;
+	enum aml_instance_type		type;
+	struct aml_vcodec_dev		*dev;
+	struct v4l2_m2m_ctx		*m2m_ctx;
+	struct aml_vdec_adapt		*ada_ctx;
+	const struct vdec_common_if	*dec_if;
+	ulong				drv_handle;
+	struct v4l2_fh			fh;
+	struct v4l2_ctrl_handler	ctrl_hdl;
+	spinlock_t			slock;
+	struct aml_video_dec_buf	*empty_flush_buf;
+	struct list_head		list;
+
+	struct aml_q_data		q_data[2];
+	wait_queue_head_t		queue;
+	struct mutex			lock, state_lock;
+	enum aml_instance_state		state;
+	struct work_struct		decode_work;
+	bool				output_thread_ready;
+	struct v4l_buff_pool		cap_pool;
+	struct list_head		vdec_thread_list;
+
+	int				dpb_size;
+	bool				param_sets_from_ucode;
+	bool				v4l_codec_dpb_ready;
+	bool				v4l_resolution_change;
+	struct completion		comp;
+	struct v4l2_config_parm		config;
+	struct vdec_pic_info		picinfo;
+	struct vdec_pic_info		last_decoded_picinfo;
+	enum v4l2_colorspace		colorspace;
+	enum v4l2_ycbcr_encoding	ycbcr_enc;
+	enum v4l2_quantization		quantization;
+	enum v4l2_xfer_func		xfer_func;
+	u32				cap_pix_fmt;
+
+	bool				has_receive_eos;
+	bool				is_drm_mode;
+	bool				output_dma_mode;
+	bool				is_stream_off;
+	bool				is_out_stream_off;
+	bool				receive_cmd_stop;
+	int				reset_flag;
+	int				decoded_frame_cnt;
+	int				buf_used_count;
+};
+
+/**
+ * struct aml_vcodec_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
+ * @vfd_enc: Video device for encoder.
+ *
+ * @m2m_dev_dec: m2m device for decoder
+ * @m2m_dev_enc: m2m device for encoder.
+ * @plat_dev: platform device
+ * @vpu_plat_dev: aml vpu platform device
+ * @alloc_ctx: VB2 allocator context
+ *	       (for allocations without kernel mapping).
+ * @ctx_list: list of struct aml_vcodec_ctx
+ * @irqlock: protect data access by irq handler and work thread
+ * @curr_ctx: The context that is waiting for codec hardware
+ *
+ * @reg_base: Mapped address of AML Vcodec registers.
+ *
+ * @id_counter: used to identify current opened instance
+ *
+ * @encode_workqueue: encode work queue
+ *
+ * @int_cond: used to identify interrupt condition happen
+ * @int_type: used to identify what kind of interrupt condition happen
+ * @dev_mutex: video_device lock
+ * @queue: waitqueue for waiting for completion of device commands
+ *
+ * @dec_irq: decoder irq resource
+ * @enc_irq: h264 encoder irq resource
+ * @enc_lt_irq: vp8 encoder irq resource
+ *
+ * @dec_mutex: decoder hardware lock
+ * @enc_mutex: encoder hardware lock.
+ *
+ * @pm: power management control
+ * @dec_capability: used to identify decode capability, ex: 4k
+ * @enc_capability: used to identify encode capability
+ */
+struct aml_vcodec_dev {
+	struct v4l2_device v4l2_dev;
+	struct video_device *vfd_dec;
+	struct video_device *vfd_enc;
+	struct file *filp;
+
+	struct v4l2_m2m_dev *m2m_dev_dec;
+	struct v4l2_m2m_dev *m2m_dev_enc;
+	struct platform_device *plat_dev;
+	struct platform_device *vpu_plat_dev;//??
+	struct vb2_alloc_ctx *alloc_ctx;//??
+	struct list_head ctx_list;
+	spinlock_t irqlock;
+	struct aml_vcodec_ctx *curr_ctx;
+	void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+
+	unsigned long id_counter;
+
+	struct workqueue_struct *decode_workqueue;
+	struct workqueue_struct *encode_workqueue;
+	int int_cond;
+	int int_type;
+	struct mutex dev_mutex;
+	wait_queue_head_t queue;
+
+	int dec_irq;
+	int enc_irq;
+	int enc_lt_irq;
+
+	struct mutex dec_mutex;
+	struct mutex enc_mutex;
+
+	struct aml_vcodec_pm pm;
+	unsigned int dec_capability;
+	unsigned int enc_capability;
+};
+
+static inline struct aml_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+	return container_of(fh, struct aml_vcodec_ctx, fh);
+}
+
+static inline struct aml_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+	return container_of(ctrl->handler, struct aml_vcodec_ctx, ctrl_hdl);
+}
+
+#endif /* _AML_VCODEC_DRV_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_util.c b/drivers/amvdec_ports/aml_vcodec_util.c
new file mode 100644
index 0000000..509a42a
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_util.c
@@ -0,0 +1,112 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+
+#include "aml_vcodec_drv.h"
+#include "aml_vcodec_util.h"
+
+void __iomem *aml_vcodec_get_reg_addr(struct aml_vcodec_ctx *data,
+					unsigned int reg_idx)
+{
+	struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)data;
+
+	if (!data || reg_idx >= NUM_MAX_VCODEC_REG_BASE) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"Invalid arguments, reg_idx=%d\n", reg_idx);
+		return NULL;
+	}
+	return ctx->dev->reg_base[reg_idx];
+}
+EXPORT_SYMBOL(aml_vcodec_get_reg_addr);
+
+int aml_vcodec_mem_alloc(struct aml_vcodec_ctx *data,
+			struct aml_vcodec_mem *mem)
+{
+	unsigned long size = mem->size;
+	struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)data;
+	struct device *dev = &ctx->dev->plat_dev->dev;
+
+	mem->vaddr = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+	//mem->vaddr = codec_mm_dma_alloc_coherent(dev_name(dev), size,
+	//		&mem->dma_addr, GFP_KERNEL, 0);
+	if (!mem->vaddr) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"%s dma_alloc size=%ld failed!\n", dev_name(dev),
+			     size);
+		return -ENOMEM;
+	}
+
+	memset(mem->vaddr, 0, size);
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "va: %p\n", mem->vaddr);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "dma: 0x%lx\n", (ulong) mem->dma_addr);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "size: 0x%lx\n", size);
+
+	return 0;
+}
+EXPORT_SYMBOL(aml_vcodec_mem_alloc);
+
+void aml_vcodec_mem_free(struct aml_vcodec_ctx *data,
+			struct aml_vcodec_mem *mem)
+{
+	unsigned long size = mem->size;
+	struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)data;
+	struct device *dev = &ctx->dev->plat_dev->dev;
+
+	if (!mem->vaddr) {
+		v4l_dbg(ctx, V4L_DEBUG_CODEC_ERROR,
+			"%s dma_free size=%ld failed!\n", dev_name(dev),
+			     size);
+		return;
+	}
+
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "va: %p\n", mem->vaddr);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "dma: 0x%lx\n", (ulong) mem->dma_addr);
+	v4l_dbg(ctx, V4L_DEBUG_CODEC_PRINFO, "size: 0x%lx\n", size);
+
+	dma_free_coherent(dev, size, mem->vaddr, mem->dma_addr);
+	mem->vaddr = NULL;
+	mem->dma_addr = 0;
+	mem->size = 0;
+}
+EXPORT_SYMBOL(aml_vcodec_mem_free);
+
+void aml_vcodec_set_curr_ctx(struct aml_vcodec_dev *dev,
+	struct aml_vcodec_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	dev->curr_ctx = ctx;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+EXPORT_SYMBOL(aml_vcodec_set_curr_ctx);
+
+struct aml_vcodec_ctx *aml_vcodec_get_curr_ctx(struct aml_vcodec_dev *dev)
+{
+	unsigned long flags;
+	struct aml_vcodec_ctx *ctx;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	ctx = dev->curr_ctx;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return ctx;
+}
+EXPORT_SYMBOL(aml_vcodec_get_curr_ctx);
diff --git a/drivers/amvdec_ports/aml_vcodec_util.h b/drivers/amvdec_ports/aml_vcodec_util.h
new file mode 100644
index 0000000..312ee40
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_util.h
@@ -0,0 +1,105 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef _AML_VCODEC_UTIL_H_
+#define _AML_VCODEC_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+#include <linux/amlogic/media/codec_mm/codec_mm.h>
+
+typedef unsigned long long	u64;
+typedef signed long long	s64;
+typedef unsigned int		u32;
+typedef unsigned short int	u16;
+typedef short int		s16;
+typedef unsigned char		u8;
+
+#define CODEC_MODE(a, b, c, d)\
+	(((u8)(a) << 24) | ((u8)(b) << 16) | ((u8)(c) << 8) | (u8)(d))
+
+#define BUFF_IDX(h, i)\
+	(((ulong)(h) << 8) | (u8)(i))
+
+struct aml_vcodec_mem {
+	int	index;
+	ulong	addr;
+	u32	size;
+	void	*vaddr;
+	u32	bytes_used;
+	u32	offset;
+	u64	timestamp;
+	dma_addr_t dma_addr;
+	u32	model;
+};
+
+struct aml_vcodec_ctx;
+struct aml_vcodec_dev;
+
+extern u32 debug_mode;
+
+#ifdef v4l_dbg
+#undef v4l_dbg
+#endif
+
+/* v4l debug define. */
+#define V4L_DEBUG_CODEC_ERROR	(0)
+#define V4L_DEBUG_CODEC_PRINFO	(1 << 0)
+#define V4L_DEBUG_CODEC_STATE	(1 << 1)
+#define V4L_DEBUG_CODEC_BUFMGR	(1 << 2)
+#define V4L_DEBUG_CODEC_INPUT	(1 << 3)
+#define V4L_DEBUG_CODEC_OUTPUT	(1 << 4)
+#define V4L_DEBUG_CODEC_COUNT	(1 << 5)
+#define V4L_DEBUG_CODEC_PARSER	(1 << 6)
+#define V4L_DEBUG_CODEC_PROT	(1 << 7)
+#define V4L_DEBUG_CODEC_EXINFO	(1 << 8)
+
+#define __v4l_dbg(h, id, fmt, args...)					\
+	do {								\
+		if (h)							\
+			pr_info("[%d]: " fmt, id, ##args);		\
+		else							\
+			pr_info(fmt, ##args);				\
+	} while (0)
+
+#define v4l_dbg(h, flags, fmt, args...)						\
+	do {									\
+		struct aml_vcodec_ctx *__ctx = (struct aml_vcodec_ctx *) h;	\
+		if ((flags == V4L_DEBUG_CODEC_ERROR) ||				\
+			(flags == V4L_DEBUG_CODEC_PRINFO) ||			\
+			(debug_mode & flags)) {				\
+			if (flags == V4L_DEBUG_CODEC_ERROR) {			\
+				__v4l_dbg(h, __ctx->id, "[ERR]: " fmt, ##args);	\
+			} else	{						\
+				__v4l_dbg(h, __ctx->id, fmt, ##args);		\
+			}							\
+		}								\
+	} while (0)
+
+void __iomem *aml_vcodec_get_reg_addr(struct aml_vcodec_ctx *data,
+				unsigned int reg_idx);
+int aml_vcodec_mem_alloc(struct aml_vcodec_ctx *data,
+				struct aml_vcodec_mem *mem);
+void aml_vcodec_mem_free(struct aml_vcodec_ctx *data,
+				struct aml_vcodec_mem *mem);
+void aml_vcodec_set_curr_ctx(struct aml_vcodec_dev *dev,
+	struct aml_vcodec_ctx *ctx);
+struct aml_vcodec_ctx *aml_vcodec_get_curr_ctx(struct aml_vcodec_dev *dev);
+
+#endif /* _AML_VCODEC_UTIL_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_vfm.c b/drivers/amvdec_ports/aml_vcodec_vfm.c
new file mode 100644
index 0000000..62896ea
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_vfm.c
@@ -0,0 +1,245 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include "aml_vcodec_vfm.h"
+#include "aml_vcodec_vfq.h"
+#include "aml_vcodec_util.h"
+#include "aml_vcodec_adapt.h"
+#include <media/v4l2-mem2mem.h>
+
+#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_VIDEO_COMPOSER
+#include <trace/events/meson_atrace.h>
+
+#define RECEIVER_NAME	"v4l2-video"
+#define PROVIDER_NAME	"v4l2-video"
+
+static struct vframe_s *vdec_vf_peek(void *op_arg)
+{
+	struct vcodec_vfm_s *vfm = (struct vcodec_vfm_s *)op_arg;
+
+	return vfq_peek(&vfm->vf_que);
+}
+
+static struct vframe_s *vdec_vf_get(void *op_arg)
+{
+	struct vcodec_vfm_s *vfm = (struct vcodec_vfm_s *)op_arg;
+
+	return vfq_pop(&vfm->vf_que);
+}
+
+static void vdec_vf_put(struct vframe_s *vf, void *op_arg)
+{
+	struct vcodec_vfm_s *vfm = (struct vcodec_vfm_s *)op_arg;
+
+	/* If the video frame from amvide that means */
+	/* the data has been processed and finished, */
+	/* then push back to VDA. thus we don't put the */
+	/* buffer to the decoder directly.*/
+
+	//vf_put(vf, vfm->recv_name);
+	//vf_notify_provider(vfm->recv_name, VFRAME_EVENT_RECEIVER_PUT, NULL);
+
+	if (vfq_level(&vfm->vf_que_recycle) > POOL_SIZE - 1) {
+		v4l_dbg(vfm->ctx, V4L_DEBUG_CODEC_ERROR, "vfq full.\n");
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	vfq_push(&vfm->vf_que_recycle, vf);
+
+	/* schedule capture work. */
+	vdec_device_vf_run(vfm->ctx);
+}
+
+static int vdec_event_cb(int type, void *data, void *private_data)
+{
+
+	if (type & VFRAME_EVENT_RECEIVER_PUT) {
+	} else if (type & VFRAME_EVENT_RECEIVER_GET) {
+	} else if (type & VFRAME_EVENT_RECEIVER_FRAME_WAIT) {
+	}
+	return 0;
+}
+
+static int vdec_vf_states(struct vframe_states *states, void *op_arg)
+{
+	struct vcodec_vfm_s *vfm = (struct vcodec_vfm_s *)op_arg;
+
+	states->vf_pool_size	= POOL_SIZE;
+	states->buf_recycle_num	= 0;
+	states->buf_free_num	= POOL_SIZE - vfq_level(&vfm->vf_que);
+	states->buf_avail_num	= vfq_level(&vfm->vf_que);
+
+	return 0;
+}
+
+void video_vf_put(char *receiver, struct vdec_v4l2_buffer *fb, int id)
+{
+	struct vframe_provider_s *vfp = vf_get_provider(receiver);
+	struct vframe_s *vf = (struct vframe_s *)fb->vf_handle;
+
+	ATRACE_COUNTER("v4l2_to", vf->index_disp);
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_OUTPUT,
+		"[%d]: TO   (%s) vf: %p, idx: %d, "
+		"Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)\n",
+		id, vfp->name, vf, vf->index,
+		fb->m.mem[0].addr, fb->m.mem[0].size,
+		fb->m.mem[1].addr, fb->m.mem[1].size,
+		fb->m.mem[2].addr, fb->m.mem[2].size);
+
+	if (vfp && vf && atomic_dec_and_test(&vf->use_cnt))
+		vf_put(vf, receiver);
+}
+
+static const struct vframe_operations_s vf_provider = {
+	.peek		= vdec_vf_peek,
+	.get		= vdec_vf_get,
+	.put		= vdec_vf_put,
+	.event_cb	= vdec_event_cb,
+	.vf_states	= vdec_vf_states,
+};
+
+static int video_receiver_event_fun(int type, void *data, void *private_data)
+{
+	int ret = 0;
+	struct vframe_states states;
+	struct vcodec_vfm_s *vfm = (struct vcodec_vfm_s *)private_data;
+
+	switch (type) {
+	case VFRAME_EVENT_PROVIDER_UNREG: {
+		if (vf_get_receiver(vfm->prov_name)) {
+			v4l_dbg(vfm->ctx, V4L_DEBUG_CODEC_EXINFO,
+				"unreg %s provider.\n",
+				vfm->prov_name);
+			vf_unreg_provider(&vfm->vf_prov);
+		}
+
+		break;
+	}
+
+	case VFRAME_EVENT_PROVIDER_START: {
+		if (vf_get_receiver(vfm->prov_name)) {
+			v4l_dbg(vfm->ctx, V4L_DEBUG_CODEC_EXINFO,
+				"reg %s provider.\n",
+				vfm->prov_name);
+			vf_provider_init(&vfm->vf_prov, vfm->prov_name,
+				&vf_provider, vfm);
+			vf_reg_provider(&vfm->vf_prov);
+			vf_notify_receiver(vfm->prov_name,
+				VFRAME_EVENT_PROVIDER_START, NULL);
+		}
+
+		vfq_init(&vfm->vf_que, POOL_SIZE + 1, &vfm->pool[0]);
+		vfq_init(&vfm->vf_que_recycle, POOL_SIZE + 1, &vfm->pool_recycle[0]);
+
+		break;
+	}
+
+	case VFRAME_EVENT_PROVIDER_QUREY_STATE: {
+		vdec_vf_states(&states, vfm);
+		if (states.buf_avail_num > 0)
+			ret = RECEIVER_ACTIVE;
+		break;
+	}
+
+	case VFRAME_EVENT_PROVIDER_VFRAME_READY: {
+		if (vfq_level(&vfm->vf_que) > POOL_SIZE - 1)
+			ret = -1;
+
+		if (!vf_peek(vfm->recv_name))
+			ret = -1;
+
+		vfm->vf = vf_get(vfm->recv_name);
+		if (!vfm->vf)
+			ret = -1;
+
+		if (ret < 0) {
+			v4l_dbg(vfm->ctx, V4L_DEBUG_CODEC_ERROR, "receiver vf err.\n");
+			break;
+		}
+
+		vfq_push(&vfm->vf_que, vfm->vf);
+
+		if (vfm->ada_ctx->vfm_path == FRAME_BASE_PATH_V4L_VIDEO) {
+			vf_notify_receiver(vfm->prov_name,
+				VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL);
+			break;
+		}
+
+		/* schedule capture work. */
+		vdec_device_vf_run(vfm->ctx);
+
+		break;
+	}
+
+	default:
+		v4l_dbg(vfm->ctx, V4L_DEBUG_CODEC_EXINFO,
+			"the vf event is %d", type);
+	}
+
+	return ret;
+}
+
+static const struct vframe_receiver_op_s vf_receiver = {
+	.event_cb	= video_receiver_event_fun
+};
+
+struct vframe_s *peek_video_frame(struct vcodec_vfm_s *vfm)
+{
+	if (vfm->ada_ctx->vfm_path == FRAME_BASE_PATH_V4L_VIDEO)
+		return vfq_peek(&vfm->vf_que_recycle);
+	else
+		return vfq_peek(&vfm->vf_que);
+}
+
+struct vframe_s *get_video_frame(struct vcodec_vfm_s *vfm)
+{
+	if (vfm->ada_ctx->vfm_path == FRAME_BASE_PATH_V4L_VIDEO)
+		return vfq_pop(&vfm->vf_que_recycle);
+	else
+		return vfq_pop(&vfm->vf_que);
+}
+
+int vcodec_vfm_init(struct vcodec_vfm_s *vfm)
+{
+	int ret;
+
+	snprintf(vfm->recv_name, VF_NAME_SIZE, "%s-%d",
+		RECEIVER_NAME, vfm->ctx->id);
+	snprintf(vfm->prov_name, VF_NAME_SIZE, "%s-%d",
+		PROVIDER_NAME, vfm->ctx->id);
+
+	vfm->ada_ctx->recv_name = vfm->recv_name;
+
+	vf_receiver_init(&vfm->vf_recv, vfm->recv_name, &vf_receiver, vfm);
+	ret = vf_reg_receiver(&vfm->vf_recv);
+
+	vfm->vfm_initialized = ret ? false : true;
+
+	return ret;
+}
+
+void vcodec_vfm_release(struct vcodec_vfm_s *vfm)
+{
+	if (vfm->vfm_initialized)
+		vf_unreg_receiver(&vfm->vf_recv);
+}
+
diff --git a/drivers/amvdec_ports/aml_vcodec_vfm.h b/drivers/amvdec_ports/aml_vcodec_vfm.h
new file mode 100644
index 0000000..141e9a7
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_vfm.h
@@ -0,0 +1,60 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef __AML_VCODEC_VFM_H_
+#define __AML_VCODEC_VFM_H_
+
+#include "aml_vcodec_vfq.h"
+#include "aml_vcodec_drv.h"
+#include "aml_vcodec_dec.h"
+#include <linux/amlogic/media/vfm/vframe_provider.h>
+#include <linux/amlogic/media/vfm/vframe_receiver.h>
+
+#define VF_NAME_SIZE	(32)
+#define POOL_SIZE	(32)
+
+struct vcodec_vfm_s {
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vdec_adapt *ada_ctx;
+	struct vfq_s vf_que;
+	struct vfq_s vf_que_recycle;
+	struct vframe_s *vf;
+	struct vframe_s *pool[POOL_SIZE + 1];
+	struct vframe_s *pool_recycle[POOL_SIZE + 1];
+	char recv_name[VF_NAME_SIZE];
+	char prov_name[VF_NAME_SIZE];
+	struct vframe_provider_s vf_prov;
+	struct vframe_receiver_s vf_recv;
+	bool vfm_initialized;
+};
+
+int vcodec_vfm_init(struct vcodec_vfm_s *vfm);
+
+void vcodec_vfm_release(struct vcodec_vfm_s *vfm);
+
+struct vframe_s *peek_video_frame(struct vcodec_vfm_s *vfm);
+
+struct vframe_s *get_video_frame(struct vcodec_vfm_s *vfm);
+
+int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out_fb);
+int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *in_fb);
+
+void video_vf_put(char *receiver, struct vdec_v4l2_buffer *fb, int id);
+
+#endif /* __AML_VCODEC_VFM_H_ */
diff --git a/drivers/amvdec_ports/aml_vcodec_vfq.h b/drivers/amvdec_ports/aml_vcodec_vfq.h
new file mode 100644
index 0000000..e19c53c
--- /dev/null
+++ b/drivers/amvdec_ports/aml_vcodec_vfq.h
@@ -0,0 +1,112 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#ifndef __AML_VCODEC_VFQ_H_
+#define __AML_VCODEC_VFQ_H_
+
+#include <linux/types.h>
+#include <asm/barrier.h>
+
+struct vfq_s {
+	int rp;
+	int wp;
+	int size;
+	int pre_rp;
+	int pre_wp;
+	struct vframe_s **pool;
+};
+
+static inline void vfq_lookup_start(struct vfq_s *q)
+{
+	q->pre_rp = q->rp;
+	q->pre_wp = q->wp;
+}
+static inline void vfq_lookup_end(struct vfq_s *q)
+{
+	q->rp = q->pre_rp;
+	q->wp = q->pre_wp;
+}
+
+static inline void vfq_init(struct vfq_s *q, u32 size, struct vframe_s **pool)
+{
+	q->rp = q->wp = 0;
+	q->size = size;
+	q->pool = pool;
+}
+
+static inline bool vfq_empty(struct vfq_s *q)
+{
+	return q->rp == q->wp;
+}
+
+static inline void vfq_push(struct vfq_s *q, struct vframe_s *vf)
+{
+	int wp = q->wp;
+
+	/*ToDo*/
+	smp_mb();
+
+	q->pool[wp] = vf;
+
+	/*ToDo*/
+	smp_wmb();
+
+	q->wp = (wp == (q->size - 1)) ? 0 : (wp + 1);
+}
+
+static inline struct vframe_s *vfq_pop(struct vfq_s *q)
+{
+	struct vframe_s *vf;
+	int rp;
+
+	if (vfq_empty(q))
+		return NULL;
+
+	rp = q->rp;
+
+	/*ToDo*/
+	smp_rmb();
+
+	vf = q->pool[rp];
+
+	/*ToDo*/
+	smp_mb();
+
+	q->rp = (rp == (q->size - 1)) ? 0 : (rp + 1);
+
+	return vf;
+}
+
+static inline struct vframe_s *vfq_peek(struct vfq_s *q)
+{
+	return (vfq_empty(q)) ? NULL : q->pool[q->rp];
+}
+
+static inline int vfq_level(struct vfq_s *q)
+{
+	int level = q->wp - q->rp;
+
+	if (level < 0)
+		level += q->size;
+
+	return level;
+}
+
+#endif /* __AML_VCODEC_VFQ_H_ */
+
diff --git a/drivers/amvdec_ports/decoder/aml_h264_parser.c b/drivers/amvdec_ports/decoder/aml_h264_parser.c
new file mode 100644
index 0000000..c9da299
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_h264_parser.c
@@ -0,0 +1,732 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_h264_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+#define MAX_DELAYED_PIC_COUNT	(16)
+#define MAX_LOG2_MAX_FRAME_NUM	(12 + 4)
+#define MIN_LOG2_MAX_FRAME_NUM	(4)
+#define MAX_SPS_COUNT		(32)
+#define EXTENDED_SAR		(255)
+
+static const struct rational h264_pixel_aspect[17] = {
+	{   0,  1 },
+	{   1,  1 },
+	{  12, 11 },
+	{  10, 11 },
+	{  16, 11 },
+	{  40, 33 },
+	{  24, 11 },
+	{  20, 11 },
+	{  32, 11 },
+	{  80, 33 },
+	{  18, 11 },
+	{  15, 11 },
+	{  64, 33 },
+	{ 160, 99 },
+	{   4,  3 },
+	{   3,  2 },
+	{   2,  1 },
+};
+
+/* maximum number of MBs in the DPB for a given level */
+static const int level_max_dpb_mbs[][2] = {
+	{ 10, 396	},
+	{ 11, 900	},
+	{ 12, 2376	},
+	{ 13, 2376	},
+	{ 20, 2376	},
+	{ 21, 4752	},
+	{ 22, 8100	},
+	{ 30, 8100	},
+	{ 31, 18000 	},
+	{ 32, 20480 	},
+	{ 40, 32768 	},
+	{ 41, 32768 	},
+	{ 42, 34816 	},
+	{ 50, 110400	},
+	{ 51, 184320	},
+	{ 52, 184320	},
+};
+
+static const u8 default_scaling4[2][16] = {
+	{  6, 13, 20, 28, 13, 20, 28, 32,
+	  20, 28, 32, 37, 28, 32, 37, 42},
+	{ 10, 14, 20, 24, 14, 20, 24, 27,
+	  20, 24, 27, 30, 24, 27, 30, 34 }
+};
+
+static const u8 default_scaling8[2][64] = {
+	{  6, 10, 13, 16, 18, 23, 25, 27,
+	  10, 11, 16, 18, 23, 25, 27, 29,
+	  13, 16, 18, 23, 25, 27, 29, 31,
+	  16, 18, 23, 25, 27, 29, 31, 33,
+	  18, 23, 25, 27, 29, 31, 33, 36,
+	  23, 25, 27, 29, 31, 33, 36, 38,
+	  25, 27, 29, 31, 33, 36, 38, 40,
+	  27, 29, 31, 33, 36, 38, 40, 42 },
+	{  9, 13, 15, 17, 19, 21, 22, 24,
+	  13, 13, 17, 19, 21, 22, 24, 25,
+	  15, 17, 19, 21, 22, 24, 25, 27,
+	  17, 19, 21, 22, 24, 25, 27, 28,
+	  19, 21, 22, 24, 25, 27, 28, 30,
+	  21, 22, 24, 25, 27, 28, 30, 32,
+	  22, 24, 25, 27, 28, 30, 32, 33,
+	  24, 25, 27, 28, 30, 32, 33, 35 }
+};
+
+extern const u8 ff_zigzag_scan[16 + 1];
+extern const u8 ff_zigzag_direct[64];
+
+static int decode_scaling_list(struct get_bits_context *gb,
+	u8 *factors, int size,
+	const u8 *jvt_list,
+	const u8 *fallback_list)
+{
+	int i, last = 8, next = 8;
+	const u8 *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct;
+
+	if (!get_bits1(gb)) /* matrix not written, we use the predicted one */
+		memcpy(factors, fallback_list, size * sizeof(u8));
+	else
+		for (i = 0; i < size; i++) {
+			if (next) {
+				int v = get_se_golomb(gb);
+				/*if (v < -128 || v > 127) { //JM19 has not check.
+					pr_err( "delta scale %d is invalid\n", v);
+					return -1;
+				}*/
+				next = (last + v) & 0xff;
+			}
+			if (!i && !next) { /* matrix not written, we use the preset one */
+				memcpy(factors, jvt_list, size * sizeof(u8));
+				break;
+			}
+			last = factors[scan[i]] = next ? next : last;
+		}
+	return 0;
+}
+
+/* returns non zero if the provided SPS scaling matrix has been filled */
+static int decode_scaling_matrices(struct get_bits_context *gb,
+	const struct h264_SPS_t *sps,
+	const struct h264_PPS_t *pps, int is_sps,
+	u8(*scaling_matrix4)[16],
+	u8(*scaling_matrix8)[64])
+{
+	int ret = 0;
+	int fallback_sps = !is_sps && sps->scaling_matrix_present;
+	const u8 *fallback[4] = {
+		fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0],
+		fallback_sps ? sps->scaling_matrix4[3] : default_scaling4[1],
+		fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0],
+		fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1]
+	};
+
+	if (get_bits1(gb)) {
+		ret |= decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]);        // Intra, Y
+		ret |= decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
+		ret |= decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
+		ret |= decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]);        // Inter, Y
+		ret |= decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
+		ret |= decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
+		if (is_sps || pps->transform_8x8_mode) {
+			ret |= decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
+			ret |= decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
+			if (sps->chroma_format_idc == 3) {
+				ret |= decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
+				ret |= decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
+				ret |= decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
+				ret |= decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
+			}
+		}
+		if (!ret)
+			ret = is_sps;
+	}
+
+	return ret;
+}
+
+static int decode_hrd_parameters(struct get_bits_context *gb,
+	struct h264_SPS_t *sps)
+{
+	int cpb_count, i;
+
+	cpb_count = get_ue_golomb_31(gb) + 1;
+	if (cpb_count > 32U) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"cpb_count %d invalid\n", cpb_count);
+		return -1;
+	}
+
+	get_bits(gb, 4); /* bit_rate_scale */
+	get_bits(gb, 4); /* cpb_size_scale */
+	for (i = 0; i < cpb_count; i++) {
+		get_ue_golomb_long(gb); /* bit_rate_value_minus1 */
+		get_ue_golomb_long(gb); /* cpb_size_value_minus1 */
+		get_bits1(gb);		/* cbr_flag */
+	}
+
+	sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1;
+	sps->cpb_removal_delay_length	  = get_bits(gb, 5) + 1;
+	sps->dpb_output_delay_length	  = get_bits(gb, 5) + 1;
+	sps->time_offset_length		  = get_bits(gb, 5);
+	sps->cpb_cnt			  = cpb_count;
+
+	return 0;
+}
+
+static int decode_vui_parameters(struct get_bits_context *gb,  struct h264_SPS_t *sps)
+{
+	int aspect_ratio_info_present_flag;
+	u32 aspect_ratio_idc;
+
+	aspect_ratio_info_present_flag = get_bits1(gb);
+
+	if (aspect_ratio_info_present_flag) {
+		aspect_ratio_idc = get_bits(gb, 8);
+		if (aspect_ratio_idc == EXTENDED_SAR) {
+			sps->sar.num = get_bits(gb, 16);
+			sps->sar.den = get_bits(gb, 16);
+		} else if (aspect_ratio_idc < ARRAY_SIZE(h264_pixel_aspect)) {
+			sps->sar = h264_pixel_aspect[aspect_ratio_idc];
+		} else {
+			return -1;
+		}
+	} else {
+		sps->sar.num =
+		sps->sar.den = 0;
+	}
+
+	if (get_bits1(gb))      /* overscan_info_present_flag */
+		get_bits1(gb);  /* overscan_appropriate_flag */
+
+	sps->video_signal_type_present_flag = get_bits1(gb);
+	if (sps->video_signal_type_present_flag) {
+		get_bits(gb, 3);                 /* video_format */
+		sps->full_range = get_bits1(gb); /* video_full_range_flag */
+
+		sps->colour_description_present_flag = get_bits1(gb);
+		if (sps->colour_description_present_flag) {
+			sps->color_primaries = get_bits(gb, 8); /* colour_primaries */
+			sps->color_trc       = get_bits(gb, 8); /* transfer_characteristics */
+			sps->colorspace      = get_bits(gb, 8); /* matrix_coefficients */
+
+			// Set invalid values to "unspecified"
+			if (!av_color_primaries_name(sps->color_primaries))
+				sps->color_primaries = AVCOL_PRI_UNSPECIFIED;
+			if (!av_color_transfer_name(sps->color_trc))
+				sps->color_trc = AVCOL_TRC_UNSPECIFIED;
+			if (!av_color_space_name(sps->colorspace))
+				sps->colorspace = AVCOL_SPC_UNSPECIFIED;
+		}
+	}
+
+	/* chroma_location_info_present_flag */
+	if (get_bits1(gb)) {
+		/* chroma_sample_location_type_top_field */
+		//avctx->chroma_sample_location = get_ue_golomb(gb) + 1;
+		get_ue_golomb(gb);  /* chroma_sample_location_type_bottom_field */
+	}
+
+	if (show_bits1(gb) && get_bits_left(gb) < 10) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Truncated VUI\n");
+		return 0;
+	}
+
+	sps->timing_info_present_flag = get_bits1(gb);
+	if (sps->timing_info_present_flag) {
+		unsigned num_units_in_tick = get_bits_long(gb, 32);
+		unsigned time_scale        = get_bits_long(gb, 32);
+		if (!num_units_in_tick || !time_scale) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+				"time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n",
+				time_scale, num_units_in_tick);
+			sps->timing_info_present_flag = 0;
+		} else {
+			sps->num_units_in_tick = num_units_in_tick;
+			sps->time_scale = time_scale;
+		}
+		sps->fixed_frame_rate_flag = get_bits1(gb);
+	}
+
+	sps->nal_hrd_parameters_present_flag = get_bits1(gb);
+	if (sps->nal_hrd_parameters_present_flag)
+		if (decode_hrd_parameters(gb, sps) < 0)
+			return -1;
+	sps->vcl_hrd_parameters_present_flag = get_bits1(gb);
+	if (sps->vcl_hrd_parameters_present_flag)
+		if (decode_hrd_parameters(gb, sps) < 0)
+			return -1;
+	if (sps->nal_hrd_parameters_present_flag ||
+		sps->vcl_hrd_parameters_present_flag)
+		get_bits1(gb);     /* low_delay_hrd_flag */
+	sps->pic_struct_present_flag = get_bits1(gb);
+	if (!get_bits_left(gb))
+		return 0;
+	sps->bitstream_restriction_flag = get_bits1(gb);
+	if (sps->bitstream_restriction_flag) {
+		get_bits1(gb);     /* motion_vectors_over_pic_boundaries_flag */
+		get_ue_golomb(gb); /* max_bytes_per_pic_denom */
+		get_ue_golomb(gb); /* max_bits_per_mb_denom */
+		get_ue_golomb(gb); /* log2_max_mv_length_horizontal */
+		get_ue_golomb(gb); /* log2_max_mv_length_vertical */
+		sps->num_reorder_frames = get_ue_golomb(gb);
+		sps->max_dec_frame_buffering = get_ue_golomb(gb); /*max_dec_frame_buffering*/
+
+		if (get_bits_left(gb) < 0) {
+			sps->num_reorder_frames         = 0;
+			sps->bitstream_restriction_flag = 0;
+		}
+
+		if (sps->num_reorder_frames > 16U
+			/* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"Clipping illegal num_reorder_frames %d\n",
+				sps->num_reorder_frames);
+				sps->num_reorder_frames = 16;
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int aml_h264_parser_sps(struct get_bits_context *gb, struct h264_SPS_t *sps)
+{
+	int ret;
+	u32 sps_id;
+	int profile_idc, level_idc, constraint_set_flags = 0;
+	int i, log2_max_frame_num_minus4;
+
+	profile_idc		= get_bits(gb, 8);
+	constraint_set_flags	|= get_bits1(gb) << 0;	// constraint_set0_flag
+	constraint_set_flags	|= get_bits1(gb) << 1;	// constraint_set1_flag
+	constraint_set_flags	|= get_bits1(gb) << 2;	// constraint_set2_flag
+	constraint_set_flags	|= get_bits1(gb) << 3;	// constraint_set3_flag
+	constraint_set_flags	|= get_bits1(gb) << 4;	// constraint_set4_flag
+	constraint_set_flags	|= get_bits1(gb) << 5;	// constraint_set5_flag
+	skip_bits(gb, 2); 				// reserved_zero_2bits
+	level_idc	= get_bits(gb, 8);
+	sps_id		= get_ue_golomb_31(gb);
+
+	if (sps_id >= MAX_SPS_COUNT) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"sps_id %u out of range\n", sps_id);
+		goto fail;
+	}
+
+	sps->sps_id			= sps_id;
+	sps->time_offset_length		= 24;
+	sps->profile_idc		= profile_idc;
+	sps->constraint_set_flags	= constraint_set_flags;
+	sps->level_idc			= level_idc;
+	sps->full_range			= -1;
+
+	memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4));
+	memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
+	sps->scaling_matrix_present = 0;
+	sps->colorspace = 2; //AVCOL_SPC_UNSPECIFIED
+
+	if (sps->profile_idc == 100 ||  // High profile
+		sps->profile_idc == 110 ||  // High10 profile
+		sps->profile_idc == 122 ||  // High422 profile
+		sps->profile_idc == 244 ||  // High444 Predictive profile
+		sps->profile_idc ==  44 ||  // Cavlc444 profile
+		sps->profile_idc ==  83 ||  // Scalable Constrained High profile (SVC)
+		sps->profile_idc ==  86 ||  // Scalable High Intra profile (SVC)
+		sps->profile_idc == 118 ||  // Stereo High profile (MVC)
+		sps->profile_idc == 128 ||  // Multiview High profile (MVC)
+		sps->profile_idc == 138 ||  // Multiview Depth High profile (MVCD)
+		sps->profile_idc == 144) {  // old High444 profile
+		sps->chroma_format_idc = get_ue_golomb_31(gb);
+
+		if (sps->chroma_format_idc > 3U) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"chroma_format_idc %u\n", sps->chroma_format_idc);
+			goto fail;
+		} else if (sps->chroma_format_idc == 3) {
+			sps->residual_color_transform_flag = get_bits1(gb);
+			if (sps->residual_color_transform_flag) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+					"separate color planes are not supported\n");
+				goto fail;
+			}
+		}
+
+		sps->bit_depth_luma	= get_ue_golomb(gb) + 8;
+		sps->bit_depth_chroma	= get_ue_golomb(gb) + 8;
+		if (sps->bit_depth_chroma != sps->bit_depth_luma) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"Different chroma and luma bit depth\n");
+			goto fail;
+		}
+
+		if (sps->bit_depth_luma	< 8 || sps->bit_depth_luma > 14 ||
+			sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"illegal bit depth value (%d, %d)\n",
+				sps->bit_depth_luma, sps->bit_depth_chroma);
+			goto fail;
+		}
+
+		sps->transform_bypass = get_bits1(gb);
+		ret = decode_scaling_matrices(gb, sps, NULL, 1,
+			sps->scaling_matrix4, sps->scaling_matrix8);
+		if (ret < 0)
+			goto fail;
+		sps->scaling_matrix_present |= ret;
+	} else {
+		sps->chroma_format_idc	= 1;
+		sps->bit_depth_luma	= 8;
+		sps->bit_depth_chroma	= 8;
+	}
+
+	log2_max_frame_num_minus4 = get_ue_golomb(gb);
+	if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
+		log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"log2_max_frame_num_minus4 out of range (0-12): %d\n",
+			log2_max_frame_num_minus4);
+		goto fail;
+	}
+	sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
+
+	sps->poc_type = get_ue_golomb_31(gb);
+	if (sps->poc_type == 0) { // FIXME #define
+		u32 t = get_ue_golomb(gb);
+		if (t > 12) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"log2_max_poc_lsb (%d) is out of range\n", t);
+			goto fail;
+		}
+		sps->log2_max_poc_lsb = t + 4;
+	} else if (sps->poc_type == 1) { // FIXME #define
+		sps->delta_pic_order_always_zero_flag	= get_bits1(gb);
+		sps->offset_for_non_ref_pic		= get_se_golomb_long(gb);
+		sps->offset_for_top_to_bottom_field	= get_se_golomb_long(gb);
+
+		sps->poc_cycle_length = get_ue_golomb(gb);
+		if ((u32)sps->poc_cycle_length >= ARRAY_SIZE(sps->offset_for_ref_frame)) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"poc_cycle_length overflow %d\n", sps->poc_cycle_length);
+			goto fail;
+		}
+
+		for (i = 0; i < sps->poc_cycle_length; i++)
+			sps->offset_for_ref_frame[i] = get_se_golomb_long(gb);
+	} else if (sps->poc_type != 2) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"illegal POC type %d\n", sps->poc_type);
+		goto fail;
+	}
+
+	sps->ref_frame_count = get_ue_golomb_31(gb);
+	if (sps->ref_frame_count > MAX_DELAYED_PIC_COUNT) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"too many reference frames %d\n", sps->ref_frame_count);
+		goto fail;
+	}
+	sps->gaps_in_frame_num_allowed_flag = get_bits1(gb);
+	sps->mb_width	= get_ue_golomb(gb) + 1;
+	sps->mb_height	= get_ue_golomb(gb) + 1;
+
+	sps->frame_mbs_only_flag = get_bits1(gb);
+
+	if (sps->mb_height >= INT_MAX / 2U) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "height overflow\n");
+		goto fail;
+	}
+	sps->mb_height *= 2 - sps->frame_mbs_only_flag;
+
+	if (!sps->frame_mbs_only_flag)
+		sps->mb_aff = get_bits1(gb);
+	else
+		sps->mb_aff = 0;
+
+	if ((u32)sps->mb_width  >= INT_MAX / 16 ||
+		(u32)sps->mb_height >= INT_MAX / 16) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"mb_width/height overflow\n");
+		goto fail;
+	}
+
+	sps->direct_8x8_inference_flag = get_bits1(gb);
+
+	sps->crop = get_bits1(gb);
+	if (sps->crop) {
+		u32 crop_left	= get_ue_golomb(gb);
+		u32 crop_right	= get_ue_golomb(gb);
+		u32 crop_top	= get_ue_golomb(gb);
+		u32 crop_bottom	= get_ue_golomb(gb);
+		int width	= 16 * sps->mb_width;
+		int height	= 16 * sps->mb_height;
+		int vsub	= (sps->chroma_format_idc == 1) ? 1 : 0;
+		int hsub	= (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ? 1 : 0;
+		int step_x	= 1 << hsub;
+		int step_y	= (2 - sps->frame_mbs_only_flag) << vsub;
+
+		if (crop_left > (u32)INT_MAX / 4 / step_x ||
+			crop_right > (u32)INT_MAX / 4 / step_x ||
+			crop_top > (u32)INT_MAX / 4 / step_y ||
+			crop_bottom > (u32)INT_MAX / 4 / step_y ||
+			(crop_left + crop_right ) * step_x >= width ||
+			(crop_top + crop_bottom) * step_y >= height) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+				"crop values invalid %u %u %u %u / %d %d\n",
+				crop_left, crop_right, crop_top, crop_bottom, width, height);
+			goto fail;
+		}
+
+		sps->crop_left	= crop_left * step_x;
+		sps->crop_right	= crop_right * step_x;
+		sps->crop_top	= crop_top * step_y;
+		sps->crop_bottom = crop_bottom * step_y;
+	} else {
+		sps->crop_left	=
+		sps->crop_right	=
+		sps->crop_top	=
+		sps->crop_bottom =
+		sps->crop	= 0;
+	}
+
+	sps->vui_parameters_present_flag = get_bits1(gb);
+	if (sps->vui_parameters_present_flag) {
+		int ret = decode_vui_parameters(gb,  sps);
+		if (ret < 0)
+			goto fail;
+	}
+
+	if (get_bits_left(gb) < 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Overread %s by %d bits\n",
+			sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb));
+		/*goto out;*/
+	}
+
+#if 0
+	/* if the maximum delay is not stored in the SPS, derive it based on the level */
+	if (!sps->bitstream_restriction_flag && sps->ref_frame_count) {
+		sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1;
+		for (i = 0; i < ARRAY_SIZE(level_max_dpb_mbs); i++) {
+			if (level_max_dpb_mbs[i][0] == sps->level_idc) {
+				sps->num_reorder_frames =
+					MIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height),
+						sps->num_reorder_frames);
+				break;
+			}
+		}
+	}
+#endif
+
+	sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1;
+	for (i = 0; i < ARRAY_SIZE(level_max_dpb_mbs); i++) {
+		if (level_max_dpb_mbs[i][0] == sps->level_idc) {
+			sps->num_reorder_frames =
+				MIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height),
+					sps->num_reorder_frames);
+			sps->num_reorder_frames += 1;
+			if (sps->max_dec_frame_buffering > sps->num_reorder_frames)
+				sps->num_reorder_frames = sps->max_dec_frame_buffering;
+			break;
+		}
+	}
+
+	if ((sps->bitstream_restriction_flag) &&
+		(sps->max_dec_frame_buffering <
+		sps->num_reorder_frames)) {
+		sps->num_reorder_frames = sps->max_dec_frame_buffering;
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+			"set reorder_pic_num to %d\n",
+			sps->num_reorder_frames);
+	}
+
+	if (!sps->sar.den)
+		sps->sar.den = 1;
+/*out:*/
+	if (1) {
+		static const char csp[4][5] = { "Gray", "420", "422", "444" };
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+			"sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %d/%d b%d reo:%d\n",
+			sps_id, sps->profile_idc, sps->level_idc,
+			sps->poc_type,
+			sps->ref_frame_count,
+			sps->mb_width, sps->mb_height,
+			sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"),
+			sps->direct_8x8_inference_flag ? "8B8" : "",
+			sps->crop_left, sps->crop_right,
+			sps->crop_top, sps->crop_bottom,
+			sps->vui_parameters_present_flag ? "VUI" : "",
+			csp[sps->chroma_format_idc],
+			sps->timing_info_present_flag ? sps->num_units_in_tick : 0,
+			sps->timing_info_present_flag ? sps->time_scale : 0,
+			sps->bit_depth_luma,
+			sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1);
+	}
+
+	return 0;
+
+fail:
+	return -1;
+}
+
+static const char *h264_nal_type_name[32] = {
+	"Unspecified 0", //H264_NAL_UNSPECIFIED
+	"Coded slice of a non-IDR picture", // H264_NAL_SLICE
+	"Coded slice data partition A", // H264_NAL_DPA
+	"Coded slice data partition B", // H264_NAL_DPB
+	"Coded slice data partition C", // H264_NAL_DPC
+	"IDR", // H264_NAL_IDR_SLICE
+	"SEI", // H264_NAL_SEI
+	"SPS", // H264_NAL_SPS
+	"PPS", // H264_NAL_PPS
+	"AUD", // H264_NAL_AUD
+	"End of sequence", // H264_NAL_END_SEQUENCE
+	"End of stream", // H264_NAL_END_STREAM
+	"Filler data", // H264_NAL_FILLER_DATA
+	"SPS extension", // H264_NAL_SPS_EXT
+	"Prefix", // H264_NAL_PREFIX
+	"Subset SPS", // H264_NAL_SUB_SPS
+	"Depth parameter set", // H264_NAL_DPS
+	"Reserved 17", // H264_NAL_RESERVED17
+	"Reserved 18", // H264_NAL_RESERVED18
+	"Auxiliary coded picture without partitioning", // H264_NAL_AUXILIARY_SLICE
+	"Slice extension", // H264_NAL_EXTEN_SLICE
+	"Slice extension for a depth view or a 3D-AVC texture view", // H264_NAL_DEPTH_EXTEN_SLICE
+	"Reserved 22", // H264_NAL_RESERVED22
+	"Reserved 23", // H264_NAL_RESERVED23
+	"Unspecified 24", // H264_NAL_UNSPECIFIED24
+	"Unspecified 25", // H264_NAL_UNSPECIFIED25
+	"Unspecified 26", // H264_NAL_UNSPECIFIED26
+	"Unspecified 27", // H264_NAL_UNSPECIFIED27
+	"Unspecified 28", // H264_NAL_UNSPECIFIED28
+	"Unspecified 29", // H264_NAL_UNSPECIFIED29
+	"Unspecified 30", // H264_NAL_UNSPECIFIED30
+	"Unspecified 31", // H264_NAL_UNSPECIFIED31
+};
+
+static const char *h264_nal_unit_name(int nal_type)
+{
+	return h264_nal_type_name[nal_type];
+}
+
+static int decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps)
+{
+	int ret = 0;
+	struct get_bits_context gb;
+	u32 src_len, rbsp_size = 0;
+	u8 *rbsp_buf = NULL;
+	int ref_idc, nalu_pos;
+	u32 nal_type;
+	u8 *p = data;
+	u32 len = size;
+
+	nalu_pos = find_start_code(p, len);
+	if (nalu_pos < 0)
+		return -1;
+
+	src_len = calc_nal_len(p + nalu_pos, size - nalu_pos);
+	rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size);
+	if (rbsp_buf == NULL)
+		return -ENOMEM;
+
+	ret = init_get_bits8(&gb, rbsp_buf, rbsp_size);
+	if (ret < 0)
+		goto out;
+
+	if (get_bits1(&gb) != 0) {
+		ret = -1;
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"invalid h264 data,return!\n");
+		goto out;
+	}
+
+	ref_idc	 = get_bits(&gb, 2);
+	nal_type = get_bits(&gb, 5);
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+		"nal_unit_type: %d(%s), nal_ref_idc: %d\n",
+		nal_type, h264_nal_unit_name(nal_type), ref_idc);
+
+	switch (nal_type) {
+	case H264_NAL_SPS:
+		ret = aml_h264_parser_sps(&gb, &ps->sps);
+		if (ret < 0)
+			goto out;
+		ps->sps_parsed = true;
+		break;
+	/*case H264_NAL_PPS:
+		ret = ff_h264_decode_picture_parameter_set(&gb, &ps->pps, rbsp_size);
+		if (ret < 0)
+			goto fail;
+		ps->pps_parsed = true;
+		break;*/
+	default:
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"Unsupport parser nal type (%s).\n",
+			h264_nal_unit_name(nal_type));
+		break;
+	}
+
+out:
+	vfree(rbsp_buf);
+
+	return ret;
+}
+
+int h264_decode_extradata_ps(u8 *buf, int size, struct h264_param_sets *ps)
+{
+	int ret = 0, i = 0, j = 0;
+	u8 *p = buf;
+	int len = size;
+
+	for (i = 4; i < size; i++) {
+		j = find_start_code(p, len);
+		if (j > 0) {
+			len = size - (p - buf);
+			ret = decode_extradata_ps(p, len, ps);
+			if (ret) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+					"parse extra data failed. err: %d\n", ret);
+				return ret;
+			}
+
+			if (ps->sps_parsed)
+				break;
+
+			p += j;
+		}
+		p++;
+	}
+
+	return ret;
+}
+
+
diff --git a/drivers/amvdec_ports/decoder/aml_h264_parser.h b/drivers/amvdec_ports/decoder/aml_h264_parser.h
new file mode 100644
index 0000000..def00dd
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_h264_parser.h
@@ -0,0 +1,210 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_h264_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef AML_H264_PARSER_H
+#define AML_H264_PARSER_H
+
+#include "../aml_vcodec_drv.h"
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+#include "../utils/pixfmt.h"
+#endif
+
+#define QP_MAX_NUM (51 + 6 * 6)           // The maximum supported qp
+
+/* NAL unit types */
+enum {
+	H264_NAL_SLICE           = 1,
+	H264_NAL_DPA             = 2,
+	H264_NAL_DPB             = 3,
+	H264_NAL_DPC             = 4,
+	H264_NAL_IDR_SLICE       = 5,
+	H264_NAL_SEI             = 6,
+	H264_NAL_SPS             = 7,
+	H264_NAL_PPS             = 8,
+	H264_NAL_AUD             = 9,
+	H264_NAL_END_SEQUENCE    = 10,
+	H264_NAL_END_STREAM      = 11,
+	H264_NAL_FILLER_DATA     = 12,
+	H264_NAL_SPS_EXT         = 13,
+	H264_NAL_AUXILIARY_SLICE = 19,
+};
+
+enum {
+	// 7.4.2.1.1: seq_parameter_set_id is in [0, 31].
+	H264_MAX_SPS_COUNT = 32,
+	// 7.4.2.2: pic_parameter_set_id is in [0, 255].
+	H264_MAX_PPS_COUNT = 256,
+
+	// A.3: MaxDpbFrames is bounded above by 16.
+	H264_MAX_DPB_FRAMES = 16,
+	// 7.4.2.1.1: max_num_ref_frames is in [0, MaxDpbFrames], and
+	// each reference frame can have two fields.
+	H264_MAX_REFS       = 2 * H264_MAX_DPB_FRAMES,
+
+	// 7.4.3.1: modification_of_pic_nums_idc is not equal to 3 at most
+	// num_ref_idx_lN_active_minus1 + 1 times (that is, once for each
+	// possible reference), then equal to 3 once.
+	H264_MAX_RPLM_COUNT = H264_MAX_REFS + 1,
+
+	// 7.4.3.3: in the worst case, we begin with a full short-term
+	// reference picture list.  Each picture in turn is moved to the
+	// long-term list (type 3) and then discarded from there (type 2).
+	// Then, we set the length of the long-term list (type 4), mark
+	// the current picture as long-term (type 6) and terminate the
+	// process (type 0).
+	H264_MAX_MMCO_COUNT = H264_MAX_REFS * 2 + 3,
+
+	// A.2.1, A.2.3: profiles supporting FMO constrain
+	// num_slice_groups_minus1 to be in [0, 7].
+	H264_MAX_SLICE_GROUPS = 8,
+
+	// E.2.2: cpb_cnt_minus1 is in [0, 31].
+	H264_MAX_CPB_CNT = 32,
+
+	// A.3: in table A-1 the highest level allows a MaxFS of 139264.
+	H264_MAX_MB_PIC_SIZE = 139264,
+	// A.3.1, A.3.2: PicWidthInMbs and PicHeightInMbs are constrained
+	// to be not greater than sqrt(MaxFS * 8).  Hence height/width are
+	// bounded above by sqrt(139264 * 8) = 1055.5 macroblocks.
+	H264_MAX_MB_WIDTH    = 1055,
+	H264_MAX_MB_HEIGHT   = 1055,
+	H264_MAX_WIDTH       = H264_MAX_MB_WIDTH  * 16,
+	H264_MAX_HEIGHT      = H264_MAX_MB_HEIGHT * 16,
+};
+
+/**
+ * Rational number (pair of numerator and denominator).
+ */
+struct rational{
+	int num; ///< Numerator
+	int den; ///< Denominator
+};
+
+/**
+ * Sequence parameter set
+ */
+struct h264_SPS_t {
+	u32 sps_id;
+	int profile_idc;
+	int level_idc;
+	int chroma_format_idc;
+	int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
+	int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
+	int poc_type;                      ///< pic_order_cnt_type
+	int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
+	int delta_pic_order_always_zero_flag;
+	int offset_for_non_ref_pic;
+	int offset_for_top_to_bottom_field;
+	int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
+	int ref_frame_count;               ///< num_ref_frames
+	int gaps_in_frame_num_allowed_flag;
+	int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
+	///< (pic_height_in_map_units_minus1 + 1) * (2 - frame_mbs_only_flag)
+	int mb_height;
+	int frame_mbs_only_flag;
+	int mb_aff;                        ///< mb_adaptive_frame_field_flag
+	int direct_8x8_inference_flag;
+	int crop;                          ///< frame_cropping_flag
+
+	/* those 4 are already in luma samples */
+	u32 crop_left;            ///< frame_cropping_rect_left_offset
+	u32 crop_right;           ///< frame_cropping_rect_right_offset
+	u32 crop_top;             ///< frame_cropping_rect_top_offset
+	u32 crop_bottom;          ///< frame_cropping_rect_bottom_offset
+	int vui_parameters_present_flag;
+	struct rational sar;
+	int video_signal_type_present_flag;
+	int full_range;
+	int colour_description_present_flag;
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	enum AVColorPrimaries color_primaries;
+	enum AVColorTransferCharacteristic color_trc;
+	enum AVColorSpace colorspace;
+#endif
+	int timing_info_present_flag;
+	u32 num_units_in_tick;
+	u32 time_scale;
+	int fixed_frame_rate_flag;
+	int32_t offset_for_ref_frame[256];
+	int bitstream_restriction_flag;
+	int num_reorder_frames;
+	int max_dec_frame_buffering;
+	int scaling_matrix_present;
+	u8 scaling_matrix4[6][16];
+	u8 scaling_matrix8[6][64];
+	int nal_hrd_parameters_present_flag;
+	int vcl_hrd_parameters_present_flag;
+	int pic_struct_present_flag;
+	int time_offset_length;
+	int cpb_cnt;                          ///< See H.264 E.1.2
+	int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
+	int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
+	int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
+	int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
+	int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
+	int residual_color_transform_flag;    ///< residual_colour_transform_flag
+	int constraint_set_flags;             ///< constraint_set[0-3]_flag
+} ;
+
+/**
+ * Picture parameter set
+ */
+struct h264_PPS_t {
+	u32 sps_id;
+	int cabac;                  ///< entropy_coding_mode_flag
+	int pic_order_present;      ///< pic_order_present_flag
+	int slice_group_count;      ///< num_slice_groups_minus1 + 1
+	int mb_slice_group_map_type;
+	u32 ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
+	int weighted_pred;          ///< weighted_pred_flag
+	int weighted_bipred_idc;
+	int init_qp;                ///< pic_init_qp_minus26 + 26
+	int init_qs;                ///< pic_init_qs_minus26 + 26
+	int chroma_qp_index_offset[2];
+	int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
+	int constrained_intra_pred;     ///< constrained_intra_pred_flag
+	int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
+	int transform_8x8_mode;         ///< transform_8x8_mode_flag
+	u8 scaling_matrix4[6][16];
+	u8 scaling_matrix8[6][64];
+	u8 chroma_qp_table[2][87+1];  ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
+	int chroma_qp_diff;
+	u8 data[4096];
+	int data_size;
+
+	u32 dequant4_buffer[6][87 + 1][16];
+	u32 dequant8_buffer[6][87 + 1][64];
+	u32(*dequant4_coeff[6])[16];
+	u32(*dequant8_coeff[6])[64];
+} ;
+
+struct h264_param_sets {
+	bool sps_parsed;
+	bool pps_parsed;
+	struct h264_SPS_t sps;
+	struct h264_PPS_t pps;
+};
+
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int h264_decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps);
+#else
+inline int h264_decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps) { return -1; }
+#endif
+
+#endif /* AML_H264_PARSER_H */
+
diff --git a/drivers/amvdec_ports/decoder/aml_hevc_parser.c b/drivers/amvdec_ports/decoder/aml_hevc_parser.c
new file mode 100644
index 0000000..4ea76b9
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_hevc_parser.c
@@ -0,0 +1,1301 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_hevc_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const u8 ff_hevc_diag_scan4x4_x[16] = {
+	0, 0, 1, 0,
+	1, 2, 0, 1,
+	2, 3, 1, 2,
+	3, 2, 3, 3,
+};
+
+const u8 ff_hevc_diag_scan4x4_y[16] = {
+	0, 1, 0, 2,
+	1, 0, 3, 2,
+	1, 0, 3, 2,
+	1, 3, 2, 3,
+};
+
+const u8 ff_hevc_diag_scan8x8_x[64] = {
+	0, 0, 1, 0,
+	1, 2, 0, 1,
+	2, 3, 0, 1,
+	2, 3, 4, 0,
+	1, 2, 3, 4,
+	5, 0, 1, 2,
+	3, 4, 5, 6,
+	0, 1, 2, 3,
+	4, 5, 6, 7,
+	1, 2, 3, 4,
+	5, 6, 7, 2,
+	3, 4, 5, 6,
+	7, 3, 4, 5,
+	6, 7, 4, 5,
+	6, 7, 5, 6,
+	7, 6, 7, 7,
+};
+
+const u8 ff_hevc_diag_scan8x8_y[64] = {
+	0, 1, 0, 2,
+	1, 0, 3, 2,
+	1, 0, 4, 3,
+	2, 1, 0, 5,
+	4, 3, 2, 1,
+	0, 6, 5, 4,
+	3, 2, 1, 0,
+	7, 6, 5, 4,
+	3, 2, 1, 0,
+	7, 6, 5, 4,
+	3, 2, 1, 7,
+	6, 5, 4, 3,
+	2, 7, 6, 5,
+	4, 3, 7, 6,
+	5, 4, 7, 6,
+	5, 7, 6, 7,
+};
+
+static const u8 default_scaling_list_intra[] = {
+	16, 16, 16, 16, 17, 18, 21, 24,
+	16, 16, 16, 16, 17, 19, 22, 25,
+	16, 16, 17, 18, 20, 22, 25, 29,
+	16, 16, 18, 21, 24, 27, 31, 36,
+	17, 17, 20, 24, 30, 35, 41, 47,
+	18, 19, 22, 27, 35, 44, 54, 65,
+	21, 22, 25, 31, 41, 54, 70, 88,
+	24, 25, 29, 36, 47, 65, 88, 115
+};
+
+static const u8 default_scaling_list_inter[] = {
+	16, 16, 16, 16, 17, 18, 20, 24,
+	16, 16, 16, 17, 18, 20, 24, 25,
+	16, 16, 17, 18, 20, 24, 25, 28,
+	16, 17, 18, 20, 24, 25, 28, 33,
+	17, 18, 20, 24, 25, 28, 33, 41,
+	18, 20, 24, 25, 28, 33, 41, 54,
+	20, 24, 25, 28, 33, 41, 54, 71,
+	24, 25, 28, 33, 41, 54, 71, 91
+};
+
+static const struct AVRational vui_sar[] = {
+	{  0,   1 },
+	{  1,   1 },
+	{ 12,  11 },
+	{ 10,  11 },
+	{ 16,  11 },
+	{ 40,  33 },
+	{ 24,  11 },
+	{ 20,  11 },
+	{ 32,  11 },
+	{ 80,  33 },
+	{ 18,  11 },
+	{ 15,  11 },
+	{ 64,  33 },
+	{ 160, 99 },
+	{  4,   3 },
+	{  3,   2 },
+	{  2,   1 },
+};
+
+static const u8 hevc_sub_width_c[] = {
+	1, 2, 2, 1
+};
+
+static const u8 hevc_sub_height_c[] = {
+	1, 2, 1, 1
+};
+
+static int decode_profile_tier_level(struct get_bits_context *gb, struct PTLCommon *ptl)
+{
+	int i;
+
+	if (get_bits_left(gb) < 2+1+5 + 32 + 4 + 16 + 16 + 12)
+		return -1;
+
+	ptl->profile_space = get_bits(gb, 2);
+	ptl->tier_flag     = get_bits1(gb);
+	ptl->profile_idc   = get_bits(gb, 5);
+	if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN)
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main profile bitstream\n");
+	else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10)
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main 10 profile bitstream\n");
+	else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Main Still Picture profile bitstream\n");
+	else if (ptl->profile_idc == FF_PROFILE_HEVC_REXT)
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Range Extension profile bitstream\n");
+	else
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Unknown HEVC profile: %d\n", ptl->profile_idc);
+
+	for (i = 0; i < 32; i++) {
+		ptl->profile_compatibility_flag[i] = get_bits1(gb);
+
+		if (ptl->profile_idc == 0 && i > 0 && ptl->profile_compatibility_flag[i])
+			ptl->profile_idc = i;
+	}
+	ptl->progressive_source_flag    = get_bits1(gb);
+	ptl->interlaced_source_flag     = get_bits1(gb);
+	ptl->non_packed_constraint_flag = get_bits1(gb);
+	ptl->frame_only_constraint_flag = get_bits1(gb);
+
+	skip_bits(gb, 16); // XXX_reserved_zero_44bits[0..15]
+	skip_bits(gb, 16); // XXX_reserved_zero_44bits[16..31]
+	skip_bits(gb, 12); // XXX_reserved_zero_44bits[32..43]
+
+	return 0;
+}
+
+static int parse_ptl(struct get_bits_context *gb, struct PTL *ptl, int max_num_sub_layers)
+{
+	int i;
+	if (decode_profile_tier_level(gb, &ptl->general_ptl) < 0 ||
+		get_bits_left(gb) < 8 + (8*2 * (max_num_sub_layers - 1 > 0))) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PTL information too short\n");
+		return -1;
+	}
+
+	ptl->general_ptl.level_idc = get_bits(gb, 8);
+
+	for (i = 0; i < max_num_sub_layers - 1; i++) {
+		ptl->sub_layer_profile_present_flag[i] = get_bits1(gb);
+		ptl->sub_layer_level_present_flag[i]   = get_bits1(gb);
+	}
+
+	if (max_num_sub_layers - 1> 0)
+		for (i = max_num_sub_layers - 1; i < 8; i++)
+			skip_bits(gb, 2); // reserved_zero_2bits[i]
+	for (i = 0; i < max_num_sub_layers - 1; i++) {
+		if (ptl->sub_layer_profile_present_flag[i] &&
+			decode_profile_tier_level(gb, &ptl->sub_layer_ptl[i]) < 0) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PTL information for sublayer %i too short\n", i);
+			return -1;
+		}
+		if (ptl->sub_layer_level_present_flag[i]) {
+			if (get_bits_left(gb) < 8) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Not enough data for sublayer %i level_idc\n", i);
+				return -1;
+			} else
+				ptl->sub_layer_ptl[i].level_idc = get_bits(gb, 8);
+		}
+	}
+
+	return 0;
+}
+
+static void decode_sublayer_hrd(struct get_bits_context *gb,
+	u32 nb_cpb, int subpic_params_present)
+{
+	int i;
+
+	for (i = 0; i < nb_cpb; i++) {
+		get_ue_golomb_long(gb); // bit_rate_value_minus1
+		get_ue_golomb_long(gb); // cpb_size_value_minus1
+
+		if (subpic_params_present) {
+			get_ue_golomb_long(gb); // cpb_size_du_value_minus1
+			get_ue_golomb_long(gb); // bit_rate_du_value_minus1
+		}
+		skip_bits1(gb); // cbr_flag
+	}
+}
+
+static int decode_hrd(struct get_bits_context *gb,
+	int common_inf_present, int max_sublayers)
+{
+	int nal_params_present = 0, vcl_params_present = 0;
+	int subpic_params_present = 0;
+	int i;
+
+	if (common_inf_present) {
+		nal_params_present = get_bits1(gb);
+		vcl_params_present = get_bits1(gb);
+
+		if (nal_params_present || vcl_params_present) {
+			subpic_params_present = get_bits1(gb);
+
+			if (subpic_params_present) {
+				skip_bits(gb, 8); // tick_divisor_minus2
+				skip_bits(gb, 5); // du_cpb_removal_delay_increment_length_minus1
+				skip_bits(gb, 1); // sub_pic_cpb_params_in_pic_timing_sei_flag
+				skip_bits(gb, 5); // dpb_output_delay_du_length_minus1
+			}
+
+			skip_bits(gb, 4); // bit_rate_scale
+			skip_bits(gb, 4); // cpb_size_scale
+
+			if (subpic_params_present)
+				skip_bits(gb, 4);  // cpb_size_du_scale
+
+			skip_bits(gb, 5); // initial_cpb_removal_delay_length_minus1
+			skip_bits(gb, 5); // au_cpb_removal_delay_length_minus1
+			skip_bits(gb, 5); // dpb_output_delay_length_minus1
+		}
+	}
+
+	for (i = 0; i < max_sublayers; i++) {
+		int low_delay = 0;
+		u32 nb_cpb = 1;
+		int fixed_rate = get_bits1(gb);
+
+		if (!fixed_rate)
+			fixed_rate = get_bits1(gb);
+
+		if (fixed_rate)
+			get_ue_golomb_long(gb);  // elemental_duration_in_tc_minus1
+		else
+			low_delay = get_bits1(gb);
+
+		if (!low_delay) {
+			nb_cpb = get_ue_golomb_long(gb) + 1;
+			if (nb_cpb < 1 || nb_cpb > 32) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "nb_cpb %d invalid\n", nb_cpb);
+				return -1;
+			}
+		}
+
+		if (nal_params_present)
+			decode_sublayer_hrd(gb, nb_cpb, subpic_params_present);
+		if (vcl_params_present)
+			decode_sublayer_hrd(gb, nb_cpb, subpic_params_present);
+	}
+	return 0;
+}
+
+int ff_hevc_parse_vps(struct get_bits_context *gb, struct h265_VPS_t *vps)
+{
+	int i,j;
+	int vps_id = 0;
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Decoding VPS\n");
+
+	vps_id = get_bits(gb, 4);
+	if (vps_id >= HEVC_MAX_VPS_COUNT) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VPS id out of range: %d\n", vps_id);
+		goto err;
+	}
+
+	if (get_bits(gb, 2) != 3) { // vps_reserved_three_2bits
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_reserved_three_2bits is not three\n");
+		goto err;
+	}
+
+	vps->vps_max_layers	= get_bits(gb, 6) + 1;
+	vps->vps_max_sub_layers	= get_bits(gb, 3) + 1;
+	vps->vps_temporal_id_nesting_flag = get_bits1(gb);
+
+	if (get_bits(gb, 16) != 0xffff) { // vps_reserved_ffff_16bits
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_reserved_ffff_16bits is not 0xffff\n");
+		goto err;
+	}
+
+	if (vps->vps_max_sub_layers > HEVC_MAX_SUB_LAYERS) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_sub_layers out of range: %d\n",
+			vps->vps_max_sub_layers);
+		goto err;
+	}
+
+	if (parse_ptl(gb, &vps->ptl, vps->vps_max_sub_layers) < 0)
+		goto err;
+
+	vps->vps_sub_layer_ordering_info_present_flag = get_bits1(gb);
+
+	i = vps->vps_sub_layer_ordering_info_present_flag ? 0 : vps->vps_max_sub_layers - 1;
+	for (; i < vps->vps_max_sub_layers; i++) {
+		vps->vps_max_dec_pic_buffering[i]	= get_ue_golomb_long(gb) + 1;
+		vps->vps_num_reorder_pics[i]		= get_ue_golomb_long(gb);
+		vps->vps_max_latency_increase[i]	= get_ue_golomb_long(gb) - 1;
+
+		if (vps->vps_max_dec_pic_buffering[i] > HEVC_MAX_DPB_SIZE || !vps->vps_max_dec_pic_buffering[i]) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_dec_pic_buffering_minus1 out of range: %d\n",
+				vps->vps_max_dec_pic_buffering[i] - 1);
+			goto err;
+		}
+		if (vps->vps_num_reorder_pics[i] > vps->vps_max_dec_pic_buffering[i] - 1) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_max_num_reorder_pics out of range: %d\n",
+				vps->vps_num_reorder_pics[i]);
+			goto err;
+		}
+	}
+
+	vps->vps_max_layer_id   = get_bits(gb, 6);
+	vps->vps_num_layer_sets = get_ue_golomb_long(gb) + 1;
+	if (vps->vps_num_layer_sets < 1 || vps->vps_num_layer_sets > 1024 ||
+		(vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "too many layer_id_included_flags\n");
+		goto err;
+	}
+
+	for (i = 1; i < vps->vps_num_layer_sets; i++)
+		for (j = 0; j <= vps->vps_max_layer_id; j++)
+			skip_bits(gb, 1);  // layer_id_included_flag[i][j]
+
+	vps->vps_timing_info_present_flag = get_bits1(gb);
+	if (vps->vps_timing_info_present_flag) {
+		vps->vps_num_units_in_tick	= get_bits_long(gb, 32);
+		vps->vps_time_scale		= get_bits_long(gb, 32);
+		vps->vps_poc_proportional_to_timing_flag = get_bits1(gb);
+		if (vps->vps_poc_proportional_to_timing_flag)
+			vps->vps_num_ticks_poc_diff_one = get_ue_golomb_long(gb) + 1;
+		vps->vps_num_hrd_parameters = get_ue_golomb_long(gb);
+		if (vps->vps_num_hrd_parameters > (u32)vps->vps_num_layer_sets) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vps_num_hrd_parameters %d is invalid\n", vps->vps_num_hrd_parameters);
+			goto err;
+		}
+		for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
+			int common_inf_present = 1;
+
+			get_ue_golomb_long(gb); // hrd_layer_set_idx
+			if (i)
+				common_inf_present = get_bits1(gb);
+			decode_hrd(gb, common_inf_present, vps->vps_max_sub_layers);
+		}
+	}
+	get_bits1(gb); /* vps_extension_flag */
+
+	if (get_bits_left(gb) < 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Overread VPS by %d bits\n", -get_bits_left(gb));
+		goto err;
+	}
+
+	return 0;
+err:
+	return -1;
+}
+
+static int map_pixel_format(struct h265_SPS_t *sps)
+{
+	/*const AVPixFmtDescriptor *desc;*/
+	switch (sps->bit_depth) {
+	case 8:
+		if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY8;
+		if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P;
+		if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P;
+		if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P;
+		break;
+	case 9:
+		if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY9;
+		if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P9;
+		if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P9;
+		if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P9;
+		break;
+	case 10:
+		if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY10;
+		if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P10;
+		if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P10;
+		if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P10;
+		break;
+	case 12:
+		if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY12;
+		if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P12;
+		if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P12;
+		if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P12;
+		break;
+	default:
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "The following bit-depths are currently specified: 8, 9, 10 and 12 bits, "
+			"chroma_format_idc is %d, depth is %d\n",
+			sps->chroma_format_idc, sps->bit_depth);
+		return -1;
+	}
+
+	/*desc = av_pix_fmt_desc_get(sps->pix_fmt);
+	if (!desc)
+		return AVERROR(EINVAL);
+
+	sps->hshift[0] = sps->vshift[0] = 0;
+	sps->hshift[2] = sps->hshift[1] = desc->log2_chroma_w;
+	sps->vshift[2] = sps->vshift[1] = desc->log2_chroma_h;*/
+
+	sps->pixel_shift = sps->bit_depth > 8;
+
+	return 0;
+}
+
+static void set_default_scaling_list_data(struct ScalingList *sl)
+{
+	int matrixId;
+
+	for (matrixId = 0; matrixId < 6; matrixId++) {
+		// 4x4 default is 16
+		memset(sl->sl[0][matrixId], 16, 16);
+		sl->sl_dc[0][matrixId] = 16; // default for 16x16
+		sl->sl_dc[1][matrixId] = 16; // default for 32x32
+	}
+	memcpy(sl->sl[1][0], default_scaling_list_intra, 64);
+	memcpy(sl->sl[1][1], default_scaling_list_intra, 64);
+	memcpy(sl->sl[1][2], default_scaling_list_intra, 64);
+	memcpy(sl->sl[1][3], default_scaling_list_inter, 64);
+	memcpy(sl->sl[1][4], default_scaling_list_inter, 64);
+	memcpy(sl->sl[1][5], default_scaling_list_inter, 64);
+	memcpy(sl->sl[2][0], default_scaling_list_intra, 64);
+	memcpy(sl->sl[2][1], default_scaling_list_intra, 64);
+	memcpy(sl->sl[2][2], default_scaling_list_intra, 64);
+	memcpy(sl->sl[2][3], default_scaling_list_inter, 64);
+	memcpy(sl->sl[2][4], default_scaling_list_inter, 64);
+	memcpy(sl->sl[2][5], default_scaling_list_inter, 64);
+	memcpy(sl->sl[3][0], default_scaling_list_intra, 64);
+	memcpy(sl->sl[3][1], default_scaling_list_intra, 64);
+	memcpy(sl->sl[3][2], default_scaling_list_intra, 64);
+	memcpy(sl->sl[3][3], default_scaling_list_inter, 64);
+	memcpy(sl->sl[3][4], default_scaling_list_inter, 64);
+	memcpy(sl->sl[3][5], default_scaling_list_inter, 64);
+}
+
+static int scaling_list_data(struct get_bits_context *gb,
+	struct ScalingList *sl, struct h265_SPS_t *sps)
+{
+	u8 scaling_list_pred_mode_flag;
+	int scaling_list_dc_coef[2][6];
+	int size_id, matrix_id, pos;
+	int i;
+
+	for (size_id = 0; size_id < 4; size_id++)
+		for (matrix_id = 0; matrix_id < 6; matrix_id += ((size_id == 3) ? 3 : 1)) {
+			scaling_list_pred_mode_flag = get_bits1(gb);
+			if (!scaling_list_pred_mode_flag) {
+				u32 delta = get_ue_golomb_long(gb);
+				/* Only need to handle non-zero delta. Zero means default,
+				* which should already be in the arrays. */
+				if (delta) {
+					// Copy from previous array.
+					delta *= (size_id == 3) ? 3 : 1;
+					if (matrix_id < delta) {
+						v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid delta in scaling list data: %d.\n", delta);
+						return -1;
+					}
+
+					memcpy(sl->sl[size_id][matrix_id],
+						sl->sl[size_id][matrix_id - delta],
+						size_id > 0 ? 64 : 16);
+					if (size_id > 1)
+						sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta];
+				}
+			} else {
+				int next_coef, coef_num;
+				int scaling_list_delta_coef;
+
+				next_coef = 8;
+				coef_num = FFMIN(64, 1 << (4 + (size_id << 1)));
+				if (size_id > 1) {
+					scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8;
+					next_coef = scaling_list_dc_coef[size_id - 2][matrix_id];
+					sl->sl_dc[size_id - 2][matrix_id] = next_coef;
+				}
+				for (i = 0; i < coef_num; i++) {
+					if (size_id == 0)
+						pos = 4 * ff_hevc_diag_scan4x4_y[i] +
+							ff_hevc_diag_scan4x4_x[i];
+					else
+						pos = 8 * ff_hevc_diag_scan8x8_y[i] +
+							ff_hevc_diag_scan8x8_x[i];
+
+					scaling_list_delta_coef = get_se_golomb(gb);
+					next_coef = (next_coef + 256U + scaling_list_delta_coef) % 256;
+					sl->sl[size_id][matrix_id][pos] = next_coef;
+				}
+			}
+		}
+
+	if (sps->chroma_format_idc == 3) {
+		for (i = 0; i < 64; i++) {
+			sl->sl[3][1][i] = sl->sl[2][1][i];
+			sl->sl[3][2][i] = sl->sl[2][2][i];
+			sl->sl[3][4][i] = sl->sl[2][4][i];
+			sl->sl[3][5][i] = sl->sl[2][5][i];
+		}
+		sl->sl_dc[1][1] = sl->sl_dc[0][1];
+		sl->sl_dc[1][2] = sl->sl_dc[0][2];
+		sl->sl_dc[1][4] = sl->sl_dc[0][4];
+		sl->sl_dc[1][5] = sl->sl_dc[0][5];
+	}
+
+	return 0;
+}
+
+int ff_hevc_decode_short_term_rps(struct get_bits_context *gb,
+	struct ShortTermRPS *rps, const struct h265_SPS_t *sps, int is_slice_header)
+{
+	u8 rps_predict = 0;
+	int delta_poc;
+	int k0 = 0;
+	int k1 = 0;
+	int k  = 0;
+	int i;
+
+	if (rps != sps->st_rps && sps->nb_st_rps)
+		rps_predict = get_bits1(gb);
+
+	if (rps_predict) {
+		const struct ShortTermRPS *rps_ridx;
+		int delta_rps;
+		u32 abs_delta_rps;
+		u8 use_delta_flag = 0;
+		u8 delta_rps_sign;
+
+		if (is_slice_header) {
+			u32 delta_idx = get_ue_golomb_long(gb) + 1;
+			if (delta_idx > sps->nb_st_rps) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_idx in slice header RPS: %d > %d.\n",
+					delta_idx, sps->nb_st_rps);
+				return -1;
+			}
+			rps_ridx = &sps->st_rps[sps->nb_st_rps - delta_idx];
+			rps->rps_idx_num_delta_pocs = rps_ridx->num_delta_pocs;
+		} else
+			rps_ridx = &sps->st_rps[rps - sps->st_rps - 1];
+
+		delta_rps_sign = get_bits1(gb);
+		abs_delta_rps  = get_ue_golomb_long(gb) + 1;
+		if (abs_delta_rps < 1 || abs_delta_rps > 32768) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of abs_delta_rps: %d\n",
+				abs_delta_rps);
+			return -1;
+		}
+		delta_rps = (1 - (delta_rps_sign << 1)) * abs_delta_rps;
+		for (i = 0; i <= rps_ridx->num_delta_pocs; i++) {
+			int used = rps->used[k] = get_bits1(gb);
+
+			if (!used)
+				use_delta_flag = get_bits1(gb);
+
+			if (used || use_delta_flag) {
+				if (i < rps_ridx->num_delta_pocs)
+					delta_poc = delta_rps + rps_ridx->delta_poc[i];
+				else
+					delta_poc = delta_rps;
+				rps->delta_poc[k] = delta_poc;
+				if (delta_poc < 0)
+					k0++;
+				else
+					k1++;
+				k++;
+			}
+		}
+
+		if (k >= ARRAY_SIZE(rps->used)) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid num_delta_pocs: %d\n", k);
+			return -1;
+		}
+
+		rps->num_delta_pocs	= k;
+		rps->num_negative_pics	= k0;
+		// sort in increasing order (smallest first)
+		if (rps->num_delta_pocs != 0) {
+			int used, tmp;
+			for (i = 1; i < rps->num_delta_pocs; i++) {
+				delta_poc	= rps->delta_poc[i];
+				used		= rps->used[i];
+				for (k = i - 1; k >= 0; k--) {
+				tmp = rps->delta_poc[k];
+					if (delta_poc < tmp) {
+						rps->delta_poc[k + 1]	= tmp;
+						rps->used[k + 1]	= rps->used[k];
+						rps->delta_poc[k]	= delta_poc;
+						rps->used[k]		= used;
+					}
+				}
+			}
+		}
+		if ((rps->num_negative_pics >> 1) != 0) {
+			int used;
+			k = rps->num_negative_pics - 1;
+			// flip the negative values to largest first
+			for (i = 0; i < rps->num_negative_pics >> 1; i++) {
+				delta_poc	= rps->delta_poc[i];
+				used		= rps->used[i];
+				rps->delta_poc[i] = rps->delta_poc[k];
+				rps->used[i]	= rps->used[k];
+				rps->delta_poc[k] = delta_poc;
+				rps->used[k]	= used;
+				k--;
+			}
+		}
+	} else {
+		u32 prev, nb_positive_pics;
+		rps->num_negative_pics	= get_ue_golomb_long(gb);
+		nb_positive_pics	= get_ue_golomb_long(gb);
+
+		if (rps->num_negative_pics >= HEVC_MAX_REFS ||
+			nb_positive_pics >= HEVC_MAX_REFS) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many refs in a short term RPS.\n");
+			return -1;
+		}
+
+		rps->num_delta_pocs = rps->num_negative_pics + nb_positive_pics;
+		if (rps->num_delta_pocs) {
+			prev = 0;
+			for (i = 0; i < rps->num_negative_pics; i++) {
+				delta_poc = get_ue_golomb_long(gb) + 1;
+				if (delta_poc < 1 || delta_poc > 32768) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_poc: %d\n",
+						delta_poc);
+					return -1;
+				}
+				prev -= delta_poc;
+				rps->delta_poc[i] = prev;
+				rps->used[i] = get_bits1(gb);
+			}
+			prev = 0;
+			for (i = 0; i < nb_positive_pics; i++) {
+				delta_poc = get_ue_golomb_long(gb) + 1;
+				if (delta_poc < 1 || delta_poc > 32768) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value of delta_poc: %d\n",
+						delta_poc);
+					return -1;
+				}
+				prev += delta_poc;
+				rps->delta_poc[rps->num_negative_pics + i] = prev;
+				rps->used[rps->num_negative_pics + i] = get_bits1(gb);
+			}
+		}
+	}
+	return 0;
+}
+
+static void decode_vui(struct get_bits_context *gb, struct h265_SPS_t *sps)
+{
+	struct VUI backup_vui, *vui = &sps->vui;
+	struct get_bits_context backup;
+	int sar_present, alt = 0;
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Decoding VUI\n");
+
+	sar_present = get_bits1(gb);
+	if (sar_present) {
+		u8 sar_idx = get_bits(gb, 8);
+		if (sar_idx < ARRAY_SIZE(vui_sar))
+			vui->sar = vui_sar[sar_idx];
+		else if (sar_idx == 255) {
+			vui->sar.num = get_bits(gb, 16);
+			vui->sar.den = get_bits(gb, 16);
+		} else
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+				"Unknown SAR index: %u.\n", sar_idx);
+	}
+
+	vui->overscan_info_present_flag = get_bits1(gb);
+	if (vui->overscan_info_present_flag)
+		vui->overscan_appropriate_flag = get_bits1(gb);
+
+	vui->video_signal_type_present_flag = get_bits1(gb);
+	if (vui->video_signal_type_present_flag) {
+		vui->video_format		= get_bits(gb, 3);
+		vui->video_full_range_flag	= get_bits1(gb);
+		vui->colour_description_present_flag = get_bits1(gb);
+		if (vui->video_full_range_flag && sps->pix_fmt == AV_PIX_FMT_YUV420P)
+			sps->pix_fmt = AV_PIX_FMT_YUVJ420P;
+		if (vui->colour_description_present_flag) {
+			vui->colour_primaries		= get_bits(gb, 8);
+			vui->transfer_characteristic	= get_bits(gb, 8);
+			vui->matrix_coeffs		= get_bits(gb, 8);
+
+			// Set invalid values to "unspecified"
+			if (!av_color_primaries_name(vui->colour_primaries))
+				vui->colour_primaries = AVCOL_PRI_UNSPECIFIED;
+			if (!av_color_transfer_name(vui->transfer_characteristic))
+				vui->transfer_characteristic = AVCOL_TRC_UNSPECIFIED;
+			if (!av_color_space_name(vui->matrix_coeffs))
+				vui->matrix_coeffs = AVCOL_SPC_UNSPECIFIED;
+			if (vui->matrix_coeffs == AVCOL_SPC_RGB) {
+				switch (sps->pix_fmt) {
+				case AV_PIX_FMT_YUV444P:
+					sps->pix_fmt = AV_PIX_FMT_GBRP;
+					break;
+				case AV_PIX_FMT_YUV444P10:
+					sps->pix_fmt = AV_PIX_FMT_GBRP10;
+					break;
+				case AV_PIX_FMT_YUV444P12:
+					sps->pix_fmt = AV_PIX_FMT_GBRP12;
+					break;
+				}
+			}
+		}
+	}
+
+	vui->chroma_loc_info_present_flag = get_bits1(gb);
+	if (vui->chroma_loc_info_present_flag) {
+		vui->chroma_sample_loc_type_top_field    = get_ue_golomb_long(gb);
+		vui->chroma_sample_loc_type_bottom_field = get_ue_golomb_long(gb);
+	}
+
+	vui->neutra_chroma_indication_flag	= get_bits1(gb);
+	vui->field_seq_flag			= get_bits1(gb);
+	vui->frame_field_info_present_flag	= get_bits1(gb);
+
+	// Backup context in case an alternate header is detected
+	memcpy(&backup, gb, sizeof(backup));
+	memcpy(&backup_vui, vui, sizeof(backup_vui));
+	if (get_bits_left(gb) >= 68 && show_bits_long(gb, 21) == 0x100000) {
+		vui->default_display_window_flag = 0;
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Invalid default display window\n");
+	} else
+		vui->default_display_window_flag = get_bits1(gb);
+
+	if (vui->default_display_window_flag) {
+		int vert_mult  = hevc_sub_height_c[sps->chroma_format_idc];
+		int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc];
+		vui->def_disp_win.left_offset	= get_ue_golomb_long(gb) * horiz_mult;
+		vui->def_disp_win.right_offset	= get_ue_golomb_long(gb) * horiz_mult;
+		vui->def_disp_win.top_offset	= get_ue_golomb_long(gb) *  vert_mult;
+		vui->def_disp_win.bottom_offset	= get_ue_golomb_long(gb) *  vert_mult;
+	}
+
+timing_info:
+	vui->vui_timing_info_present_flag = get_bits1(gb);
+
+	if (vui->vui_timing_info_present_flag) {
+		if (get_bits_left(gb) < 66 && !alt) {
+			// The alternate syntax seem to have timing info located
+			// at where def_disp_win is normally located
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Strange VUI timing information, retrying...\n");
+			memcpy(vui, &backup_vui, sizeof(backup_vui));
+			memcpy(gb, &backup, sizeof(backup));
+			alt = 1;
+			goto timing_info;
+		}
+		vui->vui_num_units_in_tick	= get_bits_long(gb, 32);
+		vui->vui_time_scale		= get_bits_long(gb, 32);
+		if (alt) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Retry got %u/%u fps\n",
+			vui->vui_time_scale, vui->vui_num_units_in_tick);
+		}
+		vui->vui_poc_proportional_to_timing_flag = get_bits1(gb);
+		if (vui->vui_poc_proportional_to_timing_flag)
+			vui->vui_num_ticks_poc_diff_one_minus1 = get_ue_golomb_long(gb);
+		vui->vui_hrd_parameters_present_flag = get_bits1(gb);
+		if (vui->vui_hrd_parameters_present_flag)
+			decode_hrd(gb, 1, sps->max_sub_layers);
+	}
+
+	vui->bitstream_restriction_flag = get_bits1(gb);
+	if (vui->bitstream_restriction_flag) {
+		if (get_bits_left(gb) < 8 && !alt) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Strange VUI bitstream restriction information, retrying"
+				" from timing information...\n");
+			memcpy(vui, &backup_vui, sizeof(backup_vui));
+			memcpy(gb, &backup, sizeof(backup));
+			alt = 1;
+			goto timing_info;
+		}
+		vui->tiles_fixed_structure_flag		= get_bits1(gb);
+		vui->motion_vectors_over_pic_boundaries_flag = get_bits1(gb);
+		vui->restricted_ref_pic_lists_flag	= get_bits1(gb);
+		vui->min_spatial_segmentation_idc	= get_ue_golomb_long(gb);
+		vui->max_bytes_per_pic_denom		= get_ue_golomb_long(gb);
+		vui->max_bits_per_min_cu_denom		= get_ue_golomb_long(gb);
+		vui->log2_max_mv_length_horizontal	= get_ue_golomb_long(gb);
+		vui->log2_max_mv_length_vertical	= get_ue_golomb_long(gb);
+	}
+
+	if (get_bits_left(gb) < 1 && !alt) {
+		// XXX: Alternate syntax when sps_range_extension_flag != 0?
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Overread in VUI, retrying from timing information...\n");
+		memcpy(vui, &backup_vui, sizeof(backup_vui));
+		memcpy(gb, &backup, sizeof(backup));
+		alt = 1;
+		goto timing_info;
+	}
+}
+
+int ff_hevc_parse_sps(struct get_bits_context *gb, struct h265_SPS_t *sps)
+{
+	int i, ret = 0;
+	int log2_diff_max_min_transform_block_size;
+	int bit_depth_chroma, start, vui_present, sublayer_ordering_info;
+	struct HEVCWindow *ow;
+
+	sps->vps_id = get_bits(gb, 4);
+	if (sps->vps_id >= HEVC_MAX_VPS_COUNT) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VPS id out of range: %d\n", sps->vps_id);
+		return -1;
+	}
+
+	sps->max_sub_layers = get_bits(gb, 3) + 1;
+		if (sps->max_sub_layers > HEVC_MAX_SUB_LAYERS) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sps_max_sub_layers out of range: %d\n",
+				sps->max_sub_layers);
+		return -1;
+	}
+
+	sps->temporal_id_nesting_flag = get_bits(gb, 1);
+
+	if ((ret = parse_ptl(gb, &sps->ptl, sps->max_sub_layers)) < 0)
+		return ret;
+
+	sps->sps_id = get_ue_golomb_long(gb);
+	if (sps->sps_id >= HEVC_MAX_SPS_COUNT) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "SPS id out of range: %d\n", sps->sps_id);
+		return -1;
+	}
+
+	sps->chroma_format_idc = get_ue_golomb_long(gb);
+	if (sps->chroma_format_idc > 3U) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "chroma_format_idc %d is invalid\n", sps->chroma_format_idc);
+		return -1;
+	}
+
+	if (sps->chroma_format_idc == 3)
+		sps->separate_colour_plane_flag = get_bits1(gb);
+
+	if (sps->separate_colour_plane_flag)
+		sps->chroma_format_idc = 0;
+
+	sps->width	= get_ue_golomb_long(gb);
+	sps->height	= get_ue_golomb_long(gb);
+	if (sps->width > 8192 || sps->height > 8192) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "width or height oversize.\n");
+		return -1;
+	}
+
+	if (get_bits1(gb)) { // pic_conformance_flag
+		int vert_mult  = hevc_sub_height_c[sps->chroma_format_idc];
+		int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc];
+		sps->pic_conf_win.left_offset	= get_ue_golomb_long(gb) * horiz_mult;
+		sps->pic_conf_win.right_offset	= get_ue_golomb_long(gb) * horiz_mult;
+		sps->pic_conf_win.top_offset	= get_ue_golomb_long(gb) *  vert_mult;
+		sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) *  vert_mult;
+		sps->output_window = sps->pic_conf_win;
+	}
+
+	sps->bit_depth   = get_ue_golomb_long(gb) + 8;
+	bit_depth_chroma = get_ue_golomb_long(gb) + 8;
+	if (sps->chroma_format_idc && bit_depth_chroma != sps->bit_depth) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Luma bit depth (%d) is different from chroma bit depth (%d), this is unsupported.\n",
+			sps->bit_depth, bit_depth_chroma);
+		return -1;
+	}
+	sps->bit_depth_chroma = bit_depth_chroma;
+
+	ret = map_pixel_format(sps);
+	if (ret < 0)
+		return ret;
+
+	sps->log2_max_poc_lsb = get_ue_golomb_long(gb) + 4;
+		if (sps->log2_max_poc_lsb > 16) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_max_pic_order_cnt_lsb_minus4 out range: %d\n",
+				sps->log2_max_poc_lsb - 4);
+		return -1;
+	}
+
+	sublayer_ordering_info = get_bits1(gb);
+	start = sublayer_ordering_info ? 0 : sps->max_sub_layers - 1;
+	for (i = start; i < sps->max_sub_layers; i++) {
+		sps->temporal_layer[i].max_dec_pic_buffering = get_ue_golomb_long(gb) + 1;
+		sps->temporal_layer[i].num_reorder_pics      = get_ue_golomb_long(gb);
+		sps->temporal_layer[i].max_latency_increase  = get_ue_golomb_long(gb) - 1;
+		if (sps->temporal_layer[i].max_dec_pic_buffering > (u32)HEVC_MAX_DPB_SIZE) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sps_max_dec_pic_buffering_minus1 out of range: %d\n",
+				sps->temporal_layer[i].max_dec_pic_buffering - 1U);
+			return -1;
+		}
+		if (sps->temporal_layer[i].num_reorder_pics > sps->temporal_layer[i].max_dec_pic_buffering - 1) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "sps_max_num_reorder_pics out of range: %d\n",
+				sps->temporal_layer[i].num_reorder_pics);
+			if (sps->temporal_layer[i].num_reorder_pics > HEVC_MAX_DPB_SIZE - 1) {
+				return -1;
+			}
+			sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[i].num_reorder_pics + 1;
+		}
+	}
+
+	if (!sublayer_ordering_info) {
+		for (i = 0; i < start; i++) {
+			sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[start].max_dec_pic_buffering;
+			sps->temporal_layer[i].num_reorder_pics	 = sps->temporal_layer[start].num_reorder_pics;
+			sps->temporal_layer[i].max_latency_increase  = sps->temporal_layer[start].max_latency_increase;
+		}
+	}
+
+	sps->log2_min_cb_size		= get_ue_golomb_long(gb) + 3;
+	sps->log2_diff_max_min_coding_block_size = get_ue_golomb_long(gb);
+	sps->log2_min_tb_size		= get_ue_golomb_long(gb) + 2;
+	log2_diff_max_min_transform_block_size = get_ue_golomb_long(gb);
+	sps->log2_max_trafo_size	= log2_diff_max_min_transform_block_size + sps->log2_min_tb_size;
+
+	if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size);
+		return -1;
+	}
+
+	if (sps->log2_diff_max_min_coding_block_size > 30) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size);
+		return -1;
+	}
+
+	if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value for log2_min_tb_size");
+		return -1;
+	}
+
+	if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size);
+		return -1;
+	}
+
+	sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
+	sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
+
+	sps->scaling_list_enable_flag = get_bits1(gb);
+	if (sps->scaling_list_enable_flag) {
+		set_default_scaling_list_data(&sps->scaling_list);
+
+		if (get_bits1(gb)) {
+			ret = scaling_list_data(gb, &sps->scaling_list, sps);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	sps->amp_enabled_flag	= get_bits1(gb);
+	sps->sao_enabled	= get_bits1(gb);
+
+	sps->pcm_enabled_flag	= get_bits1(gb);
+	if (sps->pcm_enabled_flag) {
+		sps->pcm.bit_depth = get_bits(gb, 4) + 1;
+		sps->pcm.bit_depth_chroma = get_bits(gb, 4) + 1;
+		sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3;
+		sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size +
+			get_ue_golomb_long(gb);
+		if (FFMAX(sps->pcm.bit_depth, sps->pcm.bit_depth_chroma) > sps->bit_depth) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "PCM bit depth (%d, %d) is greater than normal bit depth (%d)\n",
+				sps->pcm.bit_depth, sps->pcm.bit_depth_chroma, sps->bit_depth);
+			return -1;
+		}
+
+		sps->pcm.loop_filter_disable_flag = get_bits1(gb);
+	}
+
+	sps->nb_st_rps = get_ue_golomb_long(gb);
+	if (sps->nb_st_rps > HEVC_MAX_SHORT_TERM_REF_PIC_SETS) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many short term RPS: %d.\n", sps->nb_st_rps);
+		return -1;
+	}
+	for (i = 0; i < sps->nb_st_rps; i++) {
+		if ((ret = ff_hevc_decode_short_term_rps(gb, &sps->st_rps[i], sps, 0)) < 0)
+			return ret;
+	}
+
+	sps->long_term_ref_pics_present_flag = get_bits1(gb);
+	if (sps->long_term_ref_pics_present_flag) {
+		sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
+		if (sps->num_long_term_ref_pics_sps > HEVC_MAX_LONG_TERM_REF_PICS) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Too many long term ref pics: %d.\n",
+				sps->num_long_term_ref_pics_sps);
+			return -1;
+		}
+		for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
+			sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
+			sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
+		}
+	}
+
+	sps->sps_temporal_mvp_enabled_flag = get_bits1(gb);
+	sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb);
+	sps->vui.sar = (struct AVRational){0, 1};
+	vui_present = get_bits1(gb);
+	if (vui_present)
+		decode_vui(gb, sps);
+
+	if (get_bits1(gb)) { // sps_extension_flag
+		sps->sps_range_extension_flag = get_bits1(gb);
+		skip_bits(gb, 7); //sps_extension_7bits = get_bits(gb, 7);
+		if (sps->sps_range_extension_flag) {
+			sps->transform_skip_rotation_enabled_flag = get_bits1(gb);
+			sps->transform_skip_context_enabled_flag  = get_bits1(gb);
+			sps->implicit_rdpcm_enabled_flag = get_bits1(gb);
+			sps->explicit_rdpcm_enabled_flag = get_bits1(gb);
+			sps->extended_precision_processing_flag = get_bits1(gb);
+			if (sps->extended_precision_processing_flag)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "extended_precision_processing_flag not yet implemented\n");
+
+			sps->intra_smoothing_disabled_flag = get_bits1(gb);
+			sps->high_precision_offsets_enabled_flag = get_bits1(gb);
+			if (sps->high_precision_offsets_enabled_flag)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "high_precision_offsets_enabled_flag not yet implemented\n");
+
+			sps->persistent_rice_adaptation_enabled_flag = get_bits1(gb);
+			sps->cabac_bypass_alignment_enabled_flag  = get_bits1(gb);
+			if (sps->cabac_bypass_alignment_enabled_flag)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "cabac_bypass_alignment_enabled_flag not yet implemented\n");
+		}
+	}
+
+	ow = &sps->output_window;
+	if (ow->left_offset >= INT_MAX - ow->right_offset	  ||
+		ow->top_offset	>= INT_MAX - ow->bottom_offset	  ||
+		ow->left_offset + ow->right_offset  >= sps->width ||
+		ow->top_offset	+ ow->bottom_offset >= sps->height) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid cropping offsets: %u/%u/%u/%u\n",
+			ow->left_offset, ow->right_offset, ow->top_offset, ow->bottom_offset);
+		return -1;
+	}
+
+	// Inferred parameters
+	sps->log2_ctb_size = sps->log2_min_cb_size +
+	sps->log2_diff_max_min_coding_block_size;
+	sps->log2_min_pu_size = sps->log2_min_cb_size - 1;
+
+	if (sps->log2_ctb_size > HEVC_MAX_LOG2_CTB_SIZE) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "CTB size out of range: 2^%d\n", sps->log2_ctb_size);
+		return -1;
+	}
+	if (sps->log2_ctb_size < 4) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_ctb_size %d differs from the bounds of any known profile\n", sps->log2_ctb_size);
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "log2_ctb_size %d", sps->log2_ctb_size);
+		return -1;
+	}
+
+	sps->ctb_width  = (sps->width  + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+	sps->ctb_height = (sps->height + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+	sps->ctb_size   = sps->ctb_width * sps->ctb_height;
+
+	sps->min_cb_width  = sps->width  >> sps->log2_min_cb_size;
+	sps->min_cb_height = sps->height >> sps->log2_min_cb_size;
+	sps->min_tb_width  = sps->width  >> sps->log2_min_tb_size;
+	sps->min_tb_height = sps->height >> sps->log2_min_tb_size;
+	sps->min_pu_width  = sps->width  >> sps->log2_min_pu_size;
+	sps->min_pu_height = sps->height >> sps->log2_min_pu_size;
+	sps->tb_mask       = (1 << (sps->log2_ctb_size - sps->log2_min_tb_size)) - 1;
+	sps->qp_bd_offset = 6 * (sps->bit_depth - 8);
+
+	if (av_mod_uintp2(sps->width, sps->log2_min_cb_size) ||
+		av_mod_uintp2(sps->height, sps->log2_min_cb_size)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid coded frame dimensions.\n");
+		return -1;
+	}
+
+	if (sps->max_transform_hierarchy_depth_inter > sps->log2_ctb_size - sps->log2_min_tb_size) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max_transform_hierarchy_depth_inter out of range: %d\n",
+			sps->max_transform_hierarchy_depth_inter);
+		return -1;
+	}
+	if (sps->max_transform_hierarchy_depth_intra > sps->log2_ctb_size - sps->log2_min_tb_size) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max_transform_hierarchy_depth_intra out of range: %d\n",
+			sps->max_transform_hierarchy_depth_intra);
+		return -1;
+	}
+	if (sps->log2_max_trafo_size > FFMIN(sps->log2_ctb_size, 5)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "max transform block size out of range: %d\n",
+			sps->log2_max_trafo_size);
+			return -1;
+	}
+
+	if (get_bits_left(gb) < 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Overread SPS by %d bits\n", -get_bits_left(gb));
+		return -1;
+	}
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Parsed SPS: id %d; ref: %d, coded wxh: %dx%d, cropped wxh: %dx%d; pix_fmt: %d.\n",
+	       sps->sps_id, sps->temporal_layer[0].num_reorder_pics, sps->width, sps->height,
+	       sps->width - (sps->output_window.left_offset + sps->output_window.right_offset),
+	       sps->height - (sps->output_window.top_offset + sps->output_window.bottom_offset),
+	       sps->pix_fmt);
+
+	return 0;
+}
+
+const char *hevc_nal_type_name[64] = {
+	"TRAIL_N", // HEVC_NAL_TRAIL_N
+	"TRAIL_R", // HEVC_NAL_TRAIL_R
+	"TSA_N", // HEVC_NAL_TSA_N
+	"TSA_R", // HEVC_NAL_TSA_R
+	"STSA_N", // HEVC_NAL_STSA_N
+	"STSA_R", // HEVC_NAL_STSA_R
+	"RADL_N", // HEVC_NAL_RADL_N
+	"RADL_R", // HEVC_NAL_RADL_R
+	"RASL_N", // HEVC_NAL_RASL_N
+	"RASL_R", // HEVC_NAL_RASL_R
+	"RSV_VCL_N10", // HEVC_NAL_VCL_N10
+	"RSV_VCL_R11", // HEVC_NAL_VCL_R11
+	"RSV_VCL_N12", // HEVC_NAL_VCL_N12
+	"RSV_VLC_R13", // HEVC_NAL_VCL_R13
+	"RSV_VCL_N14", // HEVC_NAL_VCL_N14
+	"RSV_VCL_R15", // HEVC_NAL_VCL_R15
+	"BLA_W_LP", // HEVC_NAL_BLA_W_LP
+	"BLA_W_RADL", // HEVC_NAL_BLA_W_RADL
+	"BLA_N_LP", // HEVC_NAL_BLA_N_LP
+	"IDR_W_RADL", // HEVC_NAL_IDR_W_RADL
+	"IDR_N_LP", // HEVC_NAL_IDR_N_LP
+	"CRA_NUT", // HEVC_NAL_CRA_NUT
+	"IRAP_IRAP_VCL22", // HEVC_NAL_IRAP_VCL22
+	"IRAP_IRAP_VCL23", // HEVC_NAL_IRAP_VCL23
+	"RSV_VCL24", // HEVC_NAL_RSV_VCL24
+	"RSV_VCL25", // HEVC_NAL_RSV_VCL25
+	"RSV_VCL26", // HEVC_NAL_RSV_VCL26
+	"RSV_VCL27", // HEVC_NAL_RSV_VCL27
+	"RSV_VCL28", // HEVC_NAL_RSV_VCL28
+	"RSV_VCL29", // HEVC_NAL_RSV_VCL29
+	"RSV_VCL30", // HEVC_NAL_RSV_VCL30
+	"RSV_VCL31", // HEVC_NAL_RSV_VCL31
+	"VPS", // HEVC_NAL_VPS
+	"SPS", // HEVC_NAL_SPS
+	"PPS", // HEVC_NAL_PPS
+	"AUD", // HEVC_NAL_AUD
+	"EOS_NUT", // HEVC_NAL_EOS_NUT
+	"EOB_NUT", // HEVC_NAL_EOB_NUT
+	"FD_NUT", // HEVC_NAL_FD_NUT
+	"SEI_PREFIX", // HEVC_NAL_SEI_PREFIX
+	"SEI_SUFFIX", // HEVC_NAL_SEI_SUFFIX
+	"RSV_NVCL41", // HEVC_NAL_RSV_NVCL41
+	"RSV_NVCL42", // HEVC_NAL_RSV_NVCL42
+	"RSV_NVCL43", // HEVC_NAL_RSV_NVCL43
+	"RSV_NVCL44", // HEVC_NAL_RSV_NVCL44
+	"RSV_NVCL45", // HEVC_NAL_RSV_NVCL45
+	"RSV_NVCL46", // HEVC_NAL_RSV_NVCL46
+	"RSV_NVCL47", // HEVC_NAL_RSV_NVCL47
+	"UNSPEC48", // HEVC_NAL_UNSPEC48
+	"UNSPEC49", // HEVC_NAL_UNSPEC49
+	"UNSPEC50", // HEVC_NAL_UNSPEC50
+	"UNSPEC51", // HEVC_NAL_UNSPEC51
+	"UNSPEC52", // HEVC_NAL_UNSPEC52
+	"UNSPEC53", // HEVC_NAL_UNSPEC53
+	"UNSPEC54", // HEVC_NAL_UNSPEC54
+	"UNSPEC55", // HEVC_NAL_UNSPEC55
+	"UNSPEC56", // HEVC_NAL_UNSPEC56
+	"UNSPEC57", // HEVC_NAL_UNSPEC57
+	"UNSPEC58", // HEVC_NAL_UNSPEC58
+	"UNSPEC59", // HEVC_NAL_UNSPEC59
+	"UNSPEC60", // HEVC_NAL_UNSPEC60
+	"UNSPEC61", // HEVC_NAL_UNSPEC61
+	"UNSPEC62", // HEVC_NAL_UNSPEC62
+	"UNSPEC63", // HEVC_NAL_UNSPEC63
+};
+
+static const char *hevc_nal_unit_name(int nal_type)
+{
+	return hevc_nal_type_name[nal_type];
+}
+
+/**
+* Parse NAL units of found picture and decode some basic information.
+*
+* @param s parser context.
+* @param avctx codec context.
+* @param buf buffer with field/frame data.
+* @param buf_size size of the buffer.
+*/
+static int decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps)
+{
+	int ret = 0;
+	struct get_bits_context gb;
+	u32 src_len, rbsp_size = 0;
+	u8 *rbsp_buf = NULL;
+	int nalu_pos, nuh_layer_id, temporal_id;
+	u32 nal_type;
+	u8 *p = data;
+	u32 len = size;
+
+	nalu_pos = find_start_code(p, len);
+	if (nalu_pos < 0)
+		return -1;
+
+	src_len = calc_nal_len(p + nalu_pos, size - nalu_pos);
+	rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size);
+	if (rbsp_buf == NULL)
+		return -ENOMEM;
+
+	ret = init_get_bits8(&gb, rbsp_buf, rbsp_size);
+	if (ret < 0)
+		goto out;
+
+	if (get_bits1(&gb) != 0) {
+		ret = -1;
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "invalid data, return!\n");
+		goto out;
+	}
+
+	nal_type	= get_bits(&gb, 6);
+	nuh_layer_id	= get_bits(&gb, 6);
+	temporal_id	= get_bits(&gb, 3) - 1;
+	if (temporal_id < 0) {
+		ret = -1;
+		goto out;
+	}
+
+	/*pr_info("nal_unit_type: %d(%s), nuh_layer_id: %d, temporal_id: %d\n",
+		nal_type, hevc_nal_unit_name(nal_type),
+		nuh_layer_id, temporal_id);*/
+
+	switch (nal_type) {
+	case HEVC_NAL_VPS:
+		ret = ff_hevc_parse_vps(&gb, &ps->vps);
+		if (ret < 0)
+			goto out;
+		ps->vps_parsed = true;
+		break;
+	case HEVC_NAL_SPS:
+		ret = ff_hevc_parse_sps(&gb, &ps->sps);
+		if (ret < 0)
+			goto out;
+		ps->sps_parsed = true;
+		break;
+	/*case HEVC_NAL_PPS:
+		ret = ff_hevc_decode_nal_pps(&gb, NULL, ps);
+		if (ret < 0)
+			goto out;
+		ps->pps_parsed = true;
+		break;*/
+	default:
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Unsupport parser nal type (%s).\n",
+			hevc_nal_unit_name(nal_type));
+		break;
+	}
+
+out:
+	vfree(rbsp_buf);
+
+	return ret;
+}
+
+int h265_decode_extradata_ps(u8 *buf, int size, struct h265_param_sets *ps)
+{
+	int ret = 0, i = 0, j = 0;
+	u8 *p = buf;
+	int len = size;
+
+	for (i = 4; i < size; i++) {
+		j = find_start_code(p, len);
+		if (j > 0) {
+			len = size - (p - buf);
+			ret = decode_extradata_ps(p, len, ps);
+			if (ret) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse extra data failed. err: %d\n", ret);
+				return ret;
+			}
+
+			if (ps->sps_parsed)
+				break;
+
+			p += j;
+		}
+		p++;
+	}
+
+	return ret;
+}
+
diff --git a/drivers/amvdec_ports/decoder/aml_hevc_parser.h b/drivers/amvdec_ports/decoder/aml_hevc_parser.h
new file mode 100644
index 0000000..0c81cb5
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_hevc_parser.h
@@ -0,0 +1,563 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_hevc_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+#ifndef AML_HEVC_PARSER_H
+#define AML_HEVC_PARSER_H
+
+#include "../aml_vcodec_drv.h"
+#include "../utils/common.h"
+
+
+#define MAX_DPB_SIZE				16 // A.4.1
+#define MAX_REFS				16
+
+#define MAX_NB_THREADS				16
+#define SHIFT_CTB_WPP				2
+
+/**
+ * 7.4.2.1
+ */
+#define MAX_SUB_LAYERS				7
+#define MAX_VPS_COUNT				16
+#define MAX_SPS_COUNT				32
+#define MAX_PPS_COUNT				256
+#define MAX_SHORT_TERM_RPS_COUNT 		64
+#define MAX_CU_SIZE				128
+
+//TODO: check if this is really the maximum
+#define MAX_TRANSFORM_DEPTH			5
+
+#define MAX_TB_SIZE				32
+#define MAX_PB_SIZE				64
+#define MAX_LOG2_CTB_SIZE			6
+#define MAX_QP					51
+#define DEFAULT_INTRA_TC_OFFSET			2
+
+#define HEVC_CONTEXTS				183
+
+#define MRG_MAX_NUM_CANDS			5
+
+#define L0					0
+#define L1					1
+
+#define EPEL_EXTRA_BEFORE			1
+#define EPEL_EXTRA_AFTER			2
+#define EPEL_EXTRA				3
+
+#define FF_PROFILE_HEVC_MAIN			1
+#define FF_PROFILE_HEVC_MAIN_10			2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE	3
+#define FF_PROFILE_HEVC_REXT			4
+
+/**
+ * Value of the luma sample at position (x, y) in the 2D array tab.
+ */
+#define SAMPLE(tab, x, y) ((tab)[(y) * s->sps->width + (x)])
+#define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)])
+#define SAMPLE_CBF(tab, x, y) ((tab)[((y) & ((1<<log2_trafo_size)-1)) * MAX_CU_SIZE + ((x) & ((1<<log2_trafo_size)-1))])
+
+#define IS_IDR(s) (s->nal_unit_type == NAL_IDR_W_RADL || s->nal_unit_type == NAL_IDR_N_LP)
+#define IS_BLA(s) (s->nal_unit_type == NAL_BLA_W_RADL || s->nal_unit_type == NAL_BLA_W_LP || \
+                   s->nal_unit_type == NAL_BLA_N_LP)
+#define IS_IRAP(s) (s->nal_unit_type >= 16 && s->nal_unit_type <= 23)
+
+/**
+ * Table 7-3: NAL unit type codes
+ */
+enum HEVCNALUnitType {
+	HEVC_NAL_TRAIL_N    = 0,
+	HEVC_NAL_TRAIL_R    = 1,
+	HEVC_NAL_TSA_N      = 2,
+	HEVC_NAL_TSA_R      = 3,
+	HEVC_NAL_STSA_N     = 4,
+	HEVC_NAL_STSA_R     = 5,
+	HEVC_NAL_RADL_N     = 6,
+	HEVC_NAL_RADL_R     = 7,
+	HEVC_NAL_RASL_N     = 8,
+	HEVC_NAL_RASL_R     = 9,
+	HEVC_NAL_VCL_N10    = 10,
+	HEVC_NAL_VCL_R11    = 11,
+	HEVC_NAL_VCL_N12    = 12,
+	HEVC_NAL_VCL_R13    = 13,
+	HEVC_NAL_VCL_N14    = 14,
+	HEVC_NAL_VCL_R15    = 15,
+	HEVC_NAL_BLA_W_LP   = 16,
+	HEVC_NAL_BLA_W_RADL = 17,
+	HEVC_NAL_BLA_N_LP   = 18,
+	HEVC_NAL_IDR_W_RADL = 19,
+	HEVC_NAL_IDR_N_LP   = 20,
+	HEVC_NAL_CRA_NUT    = 21,
+	HEVC_NAL_IRAP_VCL22 = 22,
+	HEVC_NAL_IRAP_VCL23 = 23,
+	HEVC_NAL_RSV_VCL24  = 24,
+	HEVC_NAL_RSV_VCL25  = 25,
+	HEVC_NAL_RSV_VCL26  = 26,
+	HEVC_NAL_RSV_VCL27  = 27,
+	HEVC_NAL_RSV_VCL28  = 28,
+	HEVC_NAL_RSV_VCL29  = 29,
+	HEVC_NAL_RSV_VCL30  = 30,
+	HEVC_NAL_RSV_VCL31  = 31,
+	HEVC_NAL_VPS        = 32,
+	HEVC_NAL_SPS        = 33,
+	HEVC_NAL_PPS        = 34,
+	HEVC_NAL_AUD        = 35,
+	HEVC_NAL_EOS_NUT    = 36,
+	HEVC_NAL_EOB_NUT    = 37,
+	HEVC_NAL_FD_NUT     = 38,
+	HEVC_NAL_SEI_PREFIX = 39,
+	HEVC_NAL_SEI_SUFFIX = 40,
+};
+
+enum HEVCSliceType {
+	HEVC_SLICE_B = 0,
+	HEVC_SLICE_P = 1,
+	HEVC_SLICE_I = 2,
+};
+
+enum {
+	// 7.4.3.1: vps_max_layers_minus1 is in [0, 62].
+	HEVC_MAX_LAYERS     = 63,
+	// 7.4.3.1: vps_max_sub_layers_minus1 is in [0, 6].
+	HEVC_MAX_SUB_LAYERS = 7,
+	// 7.4.3.1: vps_num_layer_sets_minus1 is in [0, 1023].
+	HEVC_MAX_LAYER_SETS = 1024,
+
+	// 7.4.2.1: vps_video_parameter_set_id is u(4).
+	HEVC_MAX_VPS_COUNT = 16,
+	// 7.4.3.2.1: sps_seq_parameter_set_id is in [0, 15].
+	HEVC_MAX_SPS_COUNT = 16,
+	// 7.4.3.3.1: pps_pic_parameter_set_id is in [0, 63].
+	HEVC_MAX_PPS_COUNT = 64,
+
+	// A.4.2: MaxDpbSize is bounded above by 16.
+	HEVC_MAX_DPB_SIZE = 16,
+	// 7.4.3.1: vps_max_dec_pic_buffering_minus1[i] is in [0, MaxDpbSize - 1].
+	HEVC_MAX_REFS     = HEVC_MAX_DPB_SIZE,
+
+	// 7.4.3.2.1: num_short_term_ref_pic_sets is in [0, 64].
+	HEVC_MAX_SHORT_TERM_REF_PIC_SETS = 64,
+	// 7.4.3.2.1: num_long_term_ref_pics_sps is in [0, 32].
+	HEVC_MAX_LONG_TERM_REF_PICS      = 32,
+
+	// A.3: all profiles require that CtbLog2SizeY is in [4, 6].
+	HEVC_MIN_LOG2_CTB_SIZE = 4,
+	HEVC_MAX_LOG2_CTB_SIZE = 6,
+
+	// E.3.2: cpb_cnt_minus1[i] is in [0, 31].
+	HEVC_MAX_CPB_CNT = 32,
+
+	// A.4.1: in table A.6 the highest level allows a MaxLumaPs of 35 651 584.
+	HEVC_MAX_LUMA_PS = 35651584,
+	// A.4.1: pic_width_in_luma_samples and pic_height_in_luma_samples are
+	// constrained to be not greater than sqrt(MaxLumaPs * 8).  Hence height/
+	// width are bounded above by sqrt(8 * 35651584) = 16888.2 samples.
+	HEVC_MAX_WIDTH  = 16888,
+	HEVC_MAX_HEIGHT = 16888,
+
+	// A.4.1: table A.6 allows at most 22 tile rows for any level.
+	HEVC_MAX_TILE_ROWS    = 22,
+	// A.4.1: table A.6 allows at most 20 tile columns for any level.
+	HEVC_MAX_TILE_COLUMNS = 20,
+
+	// 7.4.7.1: in the worst case (tiles_enabled_flag and
+	// entropy_coding_sync_enabled_flag are both set), entry points can be
+	// placed at the beginning of every Ctb row in every tile, giving an
+	// upper bound of (num_tile_columns_minus1 + 1) * PicHeightInCtbsY - 1.
+	// Only a stream with very high resolution and perverse parameters could
+	// get near that, though, so set a lower limit here with the maximum
+	// possible value for 4K video (at most 135 16x16 Ctb rows).
+	HEVC_MAX_ENTRY_POINT_OFFSETS = HEVC_MAX_TILE_COLUMNS * 135,
+};
+
+struct ShortTermRPS {
+	u32 num_negative_pics;
+	int num_delta_pocs;
+	int rps_idx_num_delta_pocs;
+	int delta_poc[32];
+	u8 used[32];
+};
+
+struct LongTermRPS {
+	int poc[32];
+	u8 used[32];
+	u8 nb_refs;
+};
+
+struct SliceHeader {
+	u32 pps_id;
+
+	///< address (in raster order) of the first block in the current slice segment
+	u32   slice_segment_addr;
+	///< address (in raster order) of the first block in the current slice
+	u32   slice_addr;
+
+	enum HEVCSliceType slice_type;
+
+	int pic_order_cnt_lsb;
+
+	u8 first_slice_in_pic_flag;
+	u8 dependent_slice_segment_flag;
+	u8 pic_output_flag;
+	u8 colour_plane_id;
+
+	///< RPS coded in the slice header itself is stored here
+	int short_term_ref_pic_set_sps_flag;
+	int short_term_ref_pic_set_size;
+	struct ShortTermRPS slice_rps;
+	const struct ShortTermRPS *short_term_rps;
+	int long_term_ref_pic_set_size;
+	struct LongTermRPS long_term_rps;
+	u32 list_entry_lx[2][32];
+
+	u8 rpl_modification_flag[2];
+	u8 no_output_of_prior_pics_flag;
+	u8 slice_temporal_mvp_enabled_flag;
+
+	u32 nb_refs[2];
+
+	u8 slice_sample_adaptive_offset_flag[3];
+	u8 mvd_l1_zero_flag;
+
+	u8 cabac_init_flag;
+	u8 disable_deblocking_filter_flag; ///< slice_header_disable_deblocking_filter_flag
+	u8 slice_loop_filter_across_slices_enabled_flag;
+	u8 collocated_list;
+
+	u32 collocated_ref_idx;
+
+	int slice_qp_delta;
+	int slice_cb_qp_offset;
+	int slice_cr_qp_offset;
+
+	u8 cu_chroma_qp_offset_enabled_flag;
+
+	int beta_offset;    ///< beta_offset_div2 * 2
+	int tc_offset;      ///< tc_offset_div2 * 2
+
+	u32 max_num_merge_cand; ///< 5 - 5_minus_max_num_merge_cand
+
+	u8 *entry_point_offset;
+	int * offset;
+	int * size;
+	int num_entry_point_offsets;
+
+	char slice_qp;
+
+	u8 luma_log2_weight_denom;
+	s16 chroma_log2_weight_denom;
+
+	s16 luma_weight_l0[16];
+	s16 chroma_weight_l0[16][2];
+	s16 chroma_weight_l1[16][2];
+	s16 luma_weight_l1[16];
+
+	s16 luma_offset_l0[16];
+	s16 chroma_offset_l0[16][2];
+
+	s16 luma_offset_l1[16];
+	s16 chroma_offset_l1[16][2];
+
+	int slice_ctb_addr_rs;
+};
+
+struct HEVCWindow {
+	u32 left_offset;
+	u32 right_offset;
+	u32 top_offset;
+	u32 bottom_offset;
+};
+
+struct VUI {
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	struct AVRational sar;
+#endif
+	int overscan_info_present_flag;
+	int overscan_appropriate_flag;
+
+	int video_signal_type_present_flag;
+	int video_format;
+	int video_full_range_flag;
+	int colour_description_present_flag;
+	u8 colour_primaries;
+	u8 transfer_characteristic;
+	u8 matrix_coeffs;
+
+	int chroma_loc_info_present_flag;
+	int chroma_sample_loc_type_top_field;
+	int chroma_sample_loc_type_bottom_field;
+	int neutra_chroma_indication_flag;
+
+	int field_seq_flag;
+	int frame_field_info_present_flag;
+
+	int default_display_window_flag;
+	struct HEVCWindow def_disp_win;
+
+	int vui_timing_info_present_flag;
+	u32 vui_num_units_in_tick;
+	u32 vui_time_scale;
+	int vui_poc_proportional_to_timing_flag;
+	int vui_num_ticks_poc_diff_one_minus1;
+	int vui_hrd_parameters_present_flag;
+
+	int bitstream_restriction_flag;
+	int tiles_fixed_structure_flag;
+	int motion_vectors_over_pic_boundaries_flag;
+	int restricted_ref_pic_lists_flag;
+	int min_spatial_segmentation_idc;
+	int max_bytes_per_pic_denom;
+	int max_bits_per_min_cu_denom;
+	int log2_max_mv_length_horizontal;
+	int log2_max_mv_length_vertical;
+};
+
+struct PTLCommon {
+    u8 profile_space;
+    u8 tier_flag;
+    u8 profile_idc;
+    u8 profile_compatibility_flag[32];
+    u8 level_idc;
+    u8 progressive_source_flag;
+    u8 interlaced_source_flag;
+    u8 non_packed_constraint_flag;
+    u8 frame_only_constraint_flag;
+};
+
+struct PTL {
+    struct PTLCommon general_ptl;
+    struct PTLCommon sub_layer_ptl[HEVC_MAX_SUB_LAYERS];
+
+    u8 sub_layer_profile_present_flag[HEVC_MAX_SUB_LAYERS];
+    u8 sub_layer_level_present_flag[HEVC_MAX_SUB_LAYERS];
+};
+
+struct h265_VPS_t {
+	u8 vps_temporal_id_nesting_flag;
+	int vps_max_layers;
+	int vps_max_sub_layers; ///< vps_max_temporal_layers_minus1 + 1
+
+	struct PTL ptl;
+	int vps_sub_layer_ordering_info_present_flag;
+	u32 vps_max_dec_pic_buffering[HEVC_MAX_SUB_LAYERS];
+	u32 vps_num_reorder_pics[HEVC_MAX_SUB_LAYERS];
+	u32 vps_max_latency_increase[HEVC_MAX_SUB_LAYERS];
+	int vps_max_layer_id;
+	int vps_num_layer_sets; ///< vps_num_layer_sets_minus1 + 1
+	u8 vps_timing_info_present_flag;
+	u32 vps_num_units_in_tick;
+	u32 vps_time_scale;
+	u8 vps_poc_proportional_to_timing_flag;
+	int vps_num_ticks_poc_diff_one; ///< vps_num_ticks_poc_diff_one_minus1 + 1
+	int vps_num_hrd_parameters;
+};
+
+struct ScalingList {
+	/* This is a little wasteful, since sizeID 0 only needs 8 coeffs,
+	* and size ID 3 only has 2 arrays, not 6. */
+	u8 sl[4][6][64];
+	u8 sl_dc[2][6];
+};
+
+struct h265_SPS_t {
+	u8 vps_id;
+	u8 sps_id;
+	int chroma_format_idc;
+	u8 separate_colour_plane_flag;
+
+	struct HEVCWindow output_window;
+	struct HEVCWindow pic_conf_win;
+
+	int bit_depth;
+	int bit_depth_chroma;
+	int pixel_shift;
+	int pix_fmt;
+
+	u32 log2_max_poc_lsb;
+	int pcm_enabled_flag;
+
+	int max_sub_layers;
+	struct {
+		int max_dec_pic_buffering;
+		int num_reorder_pics;
+		int max_latency_increase;
+	} temporal_layer[HEVC_MAX_SUB_LAYERS];
+	u8 temporal_id_nesting_flag;
+
+	struct VUI vui;
+	struct PTL ptl;
+
+	u8 scaling_list_enable_flag;
+	struct ScalingList scaling_list;
+
+	u32 nb_st_rps;
+	struct ShortTermRPS st_rps[HEVC_MAX_SHORT_TERM_REF_PIC_SETS];
+
+	u8 amp_enabled_flag;
+	u8 sao_enabled;
+
+	u8 long_term_ref_pics_present_flag;
+	u16 lt_ref_pic_poc_lsb_sps[HEVC_MAX_LONG_TERM_REF_PICS];
+	u8 used_by_curr_pic_lt_sps_flag[HEVC_MAX_LONG_TERM_REF_PICS];
+	u8 num_long_term_ref_pics_sps;
+
+	struct {
+		u8 bit_depth;
+		u8 bit_depth_chroma;
+		u32 log2_min_pcm_cb_size;
+		u32 log2_max_pcm_cb_size;
+		u8 loop_filter_disable_flag;
+	} pcm;
+	u8 sps_temporal_mvp_enabled_flag;
+	u8 sps_strong_intra_smoothing_enable_flag;
+
+	u32 log2_min_cb_size;
+	u32 log2_diff_max_min_coding_block_size;
+	u32 log2_min_tb_size;
+	u32 log2_max_trafo_size;
+	u32 log2_ctb_size;
+	u32 log2_min_pu_size;
+
+	int max_transform_hierarchy_depth_inter;
+	int max_transform_hierarchy_depth_intra;
+
+	int sps_range_extension_flag;
+	int transform_skip_rotation_enabled_flag;
+	int transform_skip_context_enabled_flag;
+	int implicit_rdpcm_enabled_flag;
+	int explicit_rdpcm_enabled_flag;
+	int extended_precision_processing_flag;
+	int intra_smoothing_disabled_flag;
+	int high_precision_offsets_enabled_flag;
+	int persistent_rice_adaptation_enabled_flag;
+	int cabac_bypass_alignment_enabled_flag;
+
+	///< coded frame dimension in various units
+	int width;
+	int height;
+	int ctb_width;
+	int ctb_height;
+	int ctb_size;
+	int min_cb_width;
+	int min_cb_height;
+	int min_tb_width;
+	int min_tb_height;
+	int min_pu_width;
+	int min_pu_height;
+	int tb_mask;
+
+	int hshift[3];
+	int vshift[3];
+
+	int qp_bd_offset;
+
+	u8 data[4096];
+	int data_size;
+};
+
+struct h265_PPS_t {
+	u32 sps_id; ///< seq_parameter_set_id
+
+	u8 sign_data_hiding_flag;
+
+	u8 cabac_init_present_flag;
+
+	int num_ref_idx_l0_default_active; ///< num_ref_idx_l0_default_active_minus1 + 1
+	int num_ref_idx_l1_default_active; ///< num_ref_idx_l1_default_active_minus1 + 1
+	int pic_init_qp_minus26;
+
+	u8 constrained_intra_pred_flag;
+	u8 transform_skip_enabled_flag;
+
+	u8 cu_qp_delta_enabled_flag;
+	int diff_cu_qp_delta_depth;
+
+	int cb_qp_offset;
+	int cr_qp_offset;
+	u8 pic_slice_level_chroma_qp_offsets_present_flag;
+	u8 weighted_pred_flag;
+	u8 weighted_bipred_flag;
+	u8 output_flag_present_flag;
+	u8 transquant_bypass_enable_flag;
+
+	u8 dependent_slice_segments_enabled_flag;
+	u8 tiles_enabled_flag;
+	u8 entropy_coding_sync_enabled_flag;
+
+	int num_tile_columns;   ///< num_tile_columns_minus1 + 1
+	int num_tile_rows;      ///< num_tile_rows_minus1 + 1
+	u8 uniform_spacing_flag;
+	u8 loop_filter_across_tiles_enabled_flag;
+
+	u8 seq_loop_filter_across_slices_enabled_flag;
+
+	u8 deblocking_filter_control_present_flag;
+	u8 deblocking_filter_override_enabled_flag;
+	u8 disable_dbf;
+	int beta_offset;    ///< beta_offset_div2 * 2
+	int tc_offset;      ///< tc_offset_div2 * 2
+
+	u8 scaling_list_data_present_flag;
+	struct ScalingList scaling_list;
+
+	u8 lists_modification_present_flag;
+	int log2_parallel_merge_level; ///< log2_parallel_merge_level_minus2 + 2
+	int num_extra_slice_header_bits;
+	u8 slice_header_extension_present_flag;
+	u8 log2_max_transform_skip_block_size;
+	u8 cross_component_prediction_enabled_flag;
+	u8 chroma_qp_offset_list_enabled_flag;
+	u8 diff_cu_chroma_qp_offset_depth;
+	u8 chroma_qp_offset_list_len_minus1;
+	char  cb_qp_offset_list[6];
+	char  cr_qp_offset_list[6];
+	u8 log2_sao_offset_scale_luma;
+	u8 log2_sao_offset_scale_chroma;
+
+	// Inferred parameters
+	u32 *column_width;  ///< ColumnWidth
+	u32 *row_height;    ///< RowHeight
+	u32 *col_bd;        ///< ColBd
+	u32 *row_bd;        ///< RowBd
+	int *col_idxX;
+
+	int *ctb_addr_rs_to_ts; ///< CtbAddrRSToTS
+	int *ctb_addr_ts_to_rs; ///< CtbAddrTSToRS
+	int *tile_id;           ///< TileId
+	int *tile_pos_rs;       ///< TilePosRS
+	int *min_tb_addr_zs;    ///< MinTbAddrZS
+	int *min_tb_addr_zs_tab;///< MinTbAddrZS
+};
+
+struct h265_param_sets {
+	bool vps_parsed;
+	bool sps_parsed;
+	bool pps_parsed;
+	/* currently active parameter sets */
+	struct h265_VPS_t vps;
+	struct h265_SPS_t sps;
+	struct h265_PPS_t pps;
+};
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int h265_decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps);
+#else
+inline int h265_decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps) { return -1; }
+#endif
+
+#endif /* AML_HEVC_PARSER_H */
+
diff --git a/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c
new file mode 100644
index 0000000..fd56755
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.c
@@ -0,0 +1,416 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mjpeg_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+/* return the 8 bit start code value and update the search
+state. Return -1 if no start code found */
+static int find_marker(const u8 **pbuf_ptr, const u8 *buf_end)
+{
+	const u8 *buf_ptr;
+	u32 v, v2;
+	int val;
+	int skipped = 0;
+
+	buf_ptr = *pbuf_ptr;
+	while (buf_end - buf_ptr > 1) {
+		v  = *buf_ptr++;
+		v2 = *buf_ptr;
+		if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
+			val = *buf_ptr++;
+			goto found;
+		}
+		skipped++;
+	}
+	buf_ptr = buf_end;
+	val = -1;
+found:
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "find_marker skipped %d bytes\n", skipped);
+	*pbuf_ptr = buf_ptr;
+
+	return val;
+}
+
+int ff_mjpeg_find_marker(struct MJpegDecodeContext *s,
+	const u8 **buf_ptr, const u8 *buf_end,
+	const u8 **unescaped_buf_ptr,
+	int *unescaped_buf_size)
+{
+	int start_code;
+
+	start_code = find_marker(buf_ptr, buf_end);
+
+	/* unescape buffer of SOS, use special treatment for JPEG-LS */
+	if (start_code == SOS && !s->ls) {
+		const u8 *src = *buf_ptr;
+		const u8 *ptr = src;
+		u8 *dst = s->buffer;
+
+		#define copy_data_segment(skip) do {			\
+				int length = (ptr - src) - (skip);	\
+				if (length > 0) {			\
+					memcpy(dst, src, length);	\
+					dst += length;			\
+					src = ptr;			\
+				}					\
+			} while (0)
+
+
+		while (ptr < buf_end) {
+			u8 x = *(ptr++);
+
+			if (x == 0xff) {
+				int skip = 0;
+				while (ptr < buf_end && x == 0xff) {
+					x = *(ptr++);
+					skip++;
+				}
+
+				/* 0xFF, 0xFF, ... */
+				if (skip > 1) {
+					copy_data_segment(skip);
+
+					/* decrement src as it is equal to ptr after the
+					* copy_data_segment macro and we might want to
+					* copy the current value of x later on */
+					src--;
+				}
+
+				if (x < 0xd0 || x > 0xd7) {
+					copy_data_segment(1);
+					if (x)
+						break;
+				}
+			}
+			if (src < ptr)
+				copy_data_segment(0);
+		}
+		#undef copy_data_segment
+
+		*unescaped_buf_ptr  = s->buffer;
+		*unescaped_buf_size = dst - s->buffer;
+		memset(s->buffer + *unescaped_buf_size, 0,
+			AV_INPUT_BUFFER_PADDING_SIZE);
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "escaping removed %d bytes\n",
+			(int)((buf_end - *buf_ptr) - (dst - s->buffer)));
+	} else if (start_code == SOS && s->ls) {
+		const u8 *src = *buf_ptr;
+		u8 *dst  = s->buffer;
+		int bit_count = 0;
+		int t = 0, b = 0;
+		struct put_bits_context pb;
+
+		/* find marker */
+		while (src + t < buf_end) {
+			u8 x = src[t++];
+			if (x == 0xff) {
+				while ((src + t < buf_end) && x == 0xff)
+					x = src[t++];
+				if (x & 0x80) {
+					t -= FFMIN(2, t);
+					break;
+				}
+			}
+		}
+		bit_count = t * 8;
+		init_put_bits(&pb, dst, t);
+
+		/* unescape bitstream */
+		while (b < t) {
+			u8 x = src[b++];
+			put_bits(&pb, 8, x);
+			if (x == 0xFF && b < t) {
+				x = src[b++];
+				if (x & 0x80) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid escape sequence\n");
+					x &= 0x7f;
+				}
+				put_bits(&pb, 7, x);
+				bit_count--;
+			}
+		}
+		flush_put_bits(&pb);
+
+		*unescaped_buf_ptr	= dst;
+		*unescaped_buf_size = (bit_count + 7) >> 3;
+		memset(s->buffer + *unescaped_buf_size, 0,
+			AV_INPUT_BUFFER_PADDING_SIZE);
+	} else {
+		*unescaped_buf_ptr	= *buf_ptr;
+		*unescaped_buf_size = buf_end - *buf_ptr;
+	}
+
+	return start_code;
+}
+
+
+int ff_mjpeg_decode_sof(struct MJpegDecodeContext *s)
+{
+	int len, nb_components, i, width, height, bits, size_change;
+	int h_count[MAX_COMPONENTS] = { 0 };
+	int v_count[MAX_COMPONENTS] = { 0 };
+
+	s->cur_scan = 0;
+	memset(s->upscale_h, 0, sizeof(s->upscale_h));
+	memset(s->upscale_v, 0, sizeof(s->upscale_v));
+
+	/* XXX: verify len field validity */
+	len     = get_bits(&s->gb, 16);
+	bits    = get_bits(&s->gb, 8);
+
+	if (bits > 16 || bits < 1) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "bits %d is invalid\n", bits);
+		return -1;
+	}
+
+	height = get_bits(&s->gb, 16);
+	width  = get_bits(&s->gb, 16);
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "sof0: picture: %dx%d\n", width, height);
+
+	nb_components = get_bits(&s->gb, 8);
+	if (nb_components <= 0 ||
+		nb_components > MAX_COMPONENTS)
+		return -1;
+
+	s->nb_components = nb_components;
+	s->h_max         = 1;
+	s->v_max         = 1;
+	for (i = 0; i < nb_components; i++) {
+		/* component id */
+		s->component_id[i] = get_bits(&s->gb, 8) - 1;
+		h_count[i]         = get_bits(&s->gb, 4);
+		v_count[i]         = get_bits(&s->gb, 4);
+		/* compute hmax and vmax (only used in interleaved case) */
+		if (h_count[i] > s->h_max)
+			s->h_max = h_count[i];
+		if (v_count[i] > s->v_max)
+			s->v_max = v_count[i];
+		s->quant_index[i] = get_bits(&s->gb, 8);
+		if (s->quant_index[i] >= 4) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "quant_index is invalid\n");
+			return -1;
+		}
+		if (!h_count[i] || !v_count[i]) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sampling factor in component %d %d:%d\n",
+				i, h_count[i], v_count[i]);
+			return -1;
+		}
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "component %d %d:%d id: %d quant:%d\n",
+			i, h_count[i], v_count[i],
+		s->component_id[i], s->quant_index[i]);
+	}
+	if (nb_components == 4
+		&& s->component_id[0] == 'C' - 1
+		&& s->component_id[1] == 'M' - 1
+		&& s->component_id[2] == 'Y' - 1
+		&& s->component_id[3] == 'K' - 1)
+		s->adobe_transform = 0;
+
+	/* if different size, realloc/alloc picture */
+	if (width != s->width || height != s->height || bits != s->bits ||
+		memcmp(s->h_count, h_count, sizeof(h_count))                ||
+		memcmp(s->v_count, v_count, sizeof(v_count))) {
+		size_change = 1;
+
+		s->width      = width;
+		s->height     = height;
+		s->bits       = bits;
+		memcpy(s->h_count, h_count, sizeof(h_count));
+		memcpy(s->v_count, v_count, sizeof(v_count));
+		s->interlaced = 0;
+		s->got_picture = 0;
+	} else {
+		size_change = 0;
+	}
+
+	return 0;
+}
+
+static int ff_mjpeg_decode_frame(u8 *buf, int buf_size, struct MJpegDecodeContext *s)
+{
+	const u8 *buf_end, *buf_ptr;
+	const u8 *unescaped_buf_ptr;
+	int unescaped_buf_size;
+	int start_code;
+	int ret = 0;
+
+	buf_ptr = buf;
+	buf_end = buf + buf_size;
+	while (buf_ptr < buf_end) {
+		/* find start next marker */
+		start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
+						&unescaped_buf_ptr,
+						&unescaped_buf_size);
+		/* EOF */
+		if (start_code < 0) {
+			break;
+		} else if (unescaped_buf_size > INT_MAX / 8) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
+				start_code, unescaped_buf_size, buf_size);
+			return -1;
+		}
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "marker=%x avail_size_in_buf=%d\n",
+			start_code, (int)(buf_end - buf_ptr));
+
+		ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
+		if (ret < 0) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "invalid buffer\n");
+			goto fail;
+		}
+
+		s->start_code = start_code;
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "startcode: %X\n", start_code);
+
+		switch (start_code) {
+		case SOF0:
+		case SOF1:
+		case SOF2:
+		case SOF3:
+		case SOF48:
+		case SOI:
+		case SOS:
+		case EOI:
+			break;
+		default:
+			goto skip;
+		}
+
+		switch (start_code) {
+		case SOI:
+			s->restart_interval = 0;
+			s->restart_count    = 0;
+			s->raw_image_buffer      = buf_ptr;
+			s->raw_image_buffer_size = buf_end - buf_ptr;
+			/* nothing to do on SOI */
+			break;
+		case SOF0:
+		case SOF1:
+			if (start_code == SOF0)
+				s->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
+			else
+				s->profile = FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT;
+			s->lossless    = 0;
+			s->ls          = 0;
+			s->progressive = 0;
+			if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+				goto fail;
+			break;
+		case SOF2:
+			s->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
+			s->lossless    = 0;
+			s->ls          = 0;
+			s->progressive = 1;
+			if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+				goto fail;
+			break;
+		case SOF3:
+			s->profile     = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
+			s->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+			s->lossless    = 1;
+			s->ls          = 0;
+			s->progressive = 0;
+			if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+				goto fail;
+			break;
+		case SOF48:
+			s->profile     = FF_PROFILE_MJPEG_JPEG_LS;
+			s->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+			s->lossless    = 1;
+			s->ls          = 1;
+			s->progressive = 0;
+			if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+				goto fail;
+			break;
+		case EOI:
+			goto the_end;
+		case DHT:
+		case LSE:
+		case SOS:
+		case DRI:
+		case SOF5:
+		case SOF6:
+		case SOF7:
+		case SOF9:
+		case SOF10:
+		case SOF11:
+		case SOF13:
+		case SOF14:
+		case SOF15:
+		case JPG:
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code);
+			break;
+		}
+skip:
+		/* eof process start code */
+		buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "marker parser used %d bytes (%d bits)\n",
+			(get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
+	}
+
+	v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "No JPEG data found in image\n");
+	return -1;
+fail:
+	s->got_picture = 0;
+	return ret;
+the_end:
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "decode frame unused %d bytes\n", (int)(buf_end - buf_ptr));
+
+	return 0;
+}
+
+int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps)
+{
+	int ret;
+
+	ps->head_parsed = false;
+
+	ps->dec_ps.buf_size = size;
+	ps->dec_ps.buffer = vzalloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+	if (!ps->dec_ps.buffer)
+	    return -1;
+
+	ret = ff_mjpeg_decode_frame(buf, size, &ps->dec_ps);
+	if (ret) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse extra data failed. err: %d\n", ret);
+		vfree(ps->dec_ps.buffer);
+		return ret;
+	}
+
+	if (ps->dec_ps.width && ps->dec_ps.height)
+		ps->head_parsed = true;
+
+	vfree(ps->dec_ps.buffer);
+
+	return 0;
+}
+
diff --git a/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h
new file mode 100644
index 0000000..bbc9282
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mjpeg_parser.h
@@ -0,0 +1,186 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_mjpeg_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#ifndef AML_MJPEG_PARSER_H
+#define AML_MJPEG_PARSER_H
+
+#include "../aml_vcodec_drv.h"
+#include "../utils/common.h"
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+#include "../utils/get_bits.h"
+#endif
+
+#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT            0xc0
+#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT         0xc2
+#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS                0xc3
+#define FF_PROFILE_MJPEG_JPEG_LS                         0xf7
+
+#define FF_CODEC_PROPERTY_LOSSLESS        0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+
+#define MAX_COMPONENTS 4
+
+/* JPEG marker codes */
+enum JpegMarker {
+    /* start of frame */
+    SOF0  = 0xc0,       /* baseline */
+    SOF1  = 0xc1,       /* extended sequential, huffman */
+    SOF2  = 0xc2,       /* progressive, huffman */
+    SOF3  = 0xc3,       /* lossless, huffman */
+
+    SOF5  = 0xc5,       /* differential sequential, huffman */
+    SOF6  = 0xc6,       /* differential progressive, huffman */
+    SOF7  = 0xc7,       /* differential lossless, huffman */
+    JPG   = 0xc8,       /* reserved for JPEG extension */
+    SOF9  = 0xc9,       /* extended sequential, arithmetic */
+    SOF10 = 0xca,       /* progressive, arithmetic */
+    SOF11 = 0xcb,       /* lossless, arithmetic */
+
+    SOF13 = 0xcd,       /* differential sequential, arithmetic */
+    SOF14 = 0xce,       /* differential progressive, arithmetic */
+    SOF15 = 0xcf,       /* differential lossless, arithmetic */
+
+    DHT   = 0xc4,       /* define huffman tables */
+
+    DAC   = 0xcc,       /* define arithmetic-coding conditioning */
+
+    /* restart with modulo 8 count "m" */
+    RST0  = 0xd0,
+    RST1  = 0xd1,
+    RST2  = 0xd2,
+    RST3  = 0xd3,
+    RST4  = 0xd4,
+    RST5  = 0xd5,
+    RST6  = 0xd6,
+    RST7  = 0xd7,
+
+    SOI   = 0xd8,       /* start of image */
+    EOI   = 0xd9,       /* end of image */
+    SOS   = 0xda,       /* start of scan */
+    DQT   = 0xdb,       /* define quantization tables */
+    DNL   = 0xdc,       /* define number of lines */
+    DRI   = 0xdd,       /* define restart interval */
+    DHP   = 0xde,       /* define hierarchical progression */
+    EXP   = 0xdf,       /* expand reference components */
+
+    APP0  = 0xe0,
+    APP1  = 0xe1,
+    APP2  = 0xe2,
+    APP3  = 0xe3,
+    APP4  = 0xe4,
+    APP5  = 0xe5,
+    APP6  = 0xe6,
+    APP7  = 0xe7,
+    APP8  = 0xe8,
+    APP9  = 0xe9,
+    APP10 = 0xea,
+    APP11 = 0xeb,
+    APP12 = 0xec,
+    APP13 = 0xed,
+    APP14 = 0xee,
+    APP15 = 0xef,
+
+    JPG0  = 0xf0,
+    JPG1  = 0xf1,
+    JPG2  = 0xf2,
+    JPG3  = 0xf3,
+    JPG4  = 0xf4,
+    JPG5  = 0xf5,
+    JPG6  = 0xf6,
+    SOF48 = 0xf7,       ///< JPEG-LS
+    LSE   = 0xf8,       ///< JPEG-LS extension parameters
+    JPG9  = 0xf9,
+    JPG10 = 0xfa,
+    JPG11 = 0xfb,
+    JPG12 = 0xfc,
+    JPG13 = 0xfd,
+
+    COM   = 0xfe,       /* comment */
+
+    TEM   = 0x01,       /* temporary private use for arithmetic coding */
+
+    /* 0x02 -> 0xbf reserved */
+};
+
+struct VLC {
+	int bits;
+	short (*table)[2]; ///< code, bits
+	int table_size, table_allocated;
+};
+
+struct MJpegDecodeContext {
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	struct get_bits_context gb;
+#endif
+	int buf_size;
+
+	int start_code; /* current start code */
+	int buffer_size;
+	u8 *buffer;
+
+	u16 quant_matrixes[4][64];
+	struct VLC vlcs[3][4];
+	int qscale[4];      ///< quantizer scale calculated from quant_matrixes
+
+	int first_picture;    /* true if decoding first picture */
+	int interlaced;     /* true if interlaced */
+	int bottom_field;   /* true if bottom field */
+	int lossless;
+	int ls;
+	int progressive;
+	u8 upscale_h[4];
+	u8 upscale_v[4];
+	int bits;           /* bits per component */
+	int adobe_transform;
+
+	int width, height;
+	int mb_width, mb_height;
+	int nb_components;
+	int block_stride[MAX_COMPONENTS];
+	int component_id[MAX_COMPONENTS];
+	int h_count[MAX_COMPONENTS]; /* horizontal and vertical count for each component */
+	int v_count[MAX_COMPONENTS];
+	int h_scount[MAX_COMPONENTS];
+	int v_scount[MAX_COMPONENTS];
+	int h_max, v_max; /* maximum h and v counts */
+	int quant_index[4];   /* quant table index for each component */
+	int got_picture;                                ///< we found a SOF and picture is valid, too.
+	int restart_interval;
+	int restart_count;
+	int cur_scan; /* current scan, used by JPEG-LS */
+
+	// Raw stream data for hwaccel use.
+	const u8 *raw_image_buffer;
+	int raw_image_buffer_size;
+
+	int profile;
+	u32 properties;
+};
+
+struct mjpeg_param_sets {
+	bool head_parsed;
+	/* currently active parameter sets */
+	struct MJpegDecodeContext dec_ps;
+};
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps);
+#else
+inline int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps) { return -1; }
+#endif
+
+#endif
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c
new file mode 100644
index 0000000..2720446
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.c
@@ -0,0 +1,217 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mpeg12_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const struct AVRational ff_mpeg12_frame_rate_tab[16] = {
+	{    0,    0},
+	{24000, 1001},
+	{   24,    1},
+	{   25,    1},
+	{30000, 1001},
+	{   30,    1},
+	{   50,    1},
+	{60000, 1001},
+	{   60,    1},
+	// Xing's 15fps: (9)
+	{   15,    1},
+	// libmpeg3's "Unofficial economy rates": (10-13)
+	{    5,    1},
+	{   10,    1},
+	{   12,    1},
+	{   15,    1},
+	{    0,    0},
+};
+
+const u8 *avpriv_find_start_code(const u8 *p, const u8 *end, u32 *state)
+{
+	int i;
+
+	if (p >= end)
+		return end;
+
+	for (i = 0; i < 3; i++) {
+		u32 tmp = *state << 8;
+		*state = tmp + *(p++);
+		if (tmp == 0x100 || p == end)
+			return p;
+	}
+
+	while (p < end) {
+		if      (p[-1] > 1      ) p += 3;
+		else if (p[-2]          ) p += 2;
+		else if (p[-3]|(p[-1]-1)) p++;
+		else {
+			p++;
+			break;
+		}
+	}
+
+	p = FFMIN(p, end) - 4;
+	*state = AV_RB32(p);
+
+	return p + 4;
+}
+
+static void mpegvideo_extract_headers(const u8 *buf, int buf_size,
+	struct mpeg12_param_sets *ps)
+{
+	struct MpvParseContext *pc = &ps->dec_ps;
+	const u8 *buf_end = buf + buf_size;
+	u32 start_code;
+	int frame_rate_index, ext_type, bytes_left;
+	int frame_rate_ext_n, frame_rate_ext_d;
+	int top_field_first, repeat_first_field, progressive_frame;
+	int horiz_size_ext, vert_size_ext, bit_rate_ext;
+	int bit_rate = 0;
+	int vbv_delay = 0;
+	int chroma_format;
+	enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+	//FIXME replace the crap with get_bits()
+	pc->repeat_pict = 0;
+
+	while (buf < buf_end) {
+		start_code= -1;
+		buf= avpriv_find_start_code(buf, buf_end, &start_code);
+		bytes_left = buf_end - buf;
+		switch (start_code) {
+		case PICTURE_START_CODE:
+			if (bytes_left >= 2) {
+				pc->pict_type = (buf[1] >> 3) & 7;
+				if (bytes_left >= 4)
+					vbv_delay = ((buf[1] & 0x07) << 13) | (buf[2] << 5) | (buf[3] >> 3);
+			}
+			break;
+		case SEQ_START_CODE:
+			if (bytes_left >= 7) {
+				pc->width  = (buf[0] << 4) | (buf[1] >> 4);
+				pc->height = ((buf[1] & 0x0f) << 8) | buf[2];
+
+				pix_fmt = AV_PIX_FMT_YUV420P;
+				frame_rate_index = buf[3] & 0xf;
+				pc->frame_rate = ff_mpeg12_frame_rate_tab[frame_rate_index];
+				bit_rate = (buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6);
+				pc->ticks_per_frame = 1;
+			}
+			break;
+		case EXT_START_CODE:
+			if (bytes_left >= 1) {
+				ext_type = (buf[0] >> 4);
+				switch (ext_type) {
+				case 0x1: /* sequence extension */
+					if (bytes_left >= 6) {
+						horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7);
+						vert_size_ext = (buf[2] >> 5) & 3;
+						bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1);
+						frame_rate_ext_n = (buf[5] >> 5) & 3;
+						frame_rate_ext_d = (buf[5] & 0x1f);
+						pc->progressive_sequence = buf[1] & (1 << 3);
+						pc->has_b_frames= !(buf[5] >> 7);
+
+						chroma_format = (buf[1] >> 1) & 3;
+						switch (chroma_format) {
+						case 1: pix_fmt = AV_PIX_FMT_YUV420P; break;
+						case 2: pix_fmt = AV_PIX_FMT_YUV422P; break;
+						case 3: pix_fmt = AV_PIX_FMT_YUV444P; break;
+						}
+
+						pc->width  = (pc->width & 0xFFF) | (horiz_size_ext << 12);
+						pc->height = (pc->height& 0xFFF) | ( vert_size_ext << 12);
+						bit_rate = (bit_rate&0x3FFFF) | (bit_rate_ext << 18);
+						//if(did_set_size)
+						//set_dim_ret = ff_set_dimensions(avctx, pc->width, pc->height);
+						pc->framerate.num = pc->frame_rate.num * (frame_rate_ext_n + 1);
+						pc->framerate.den = pc->frame_rate.den * (frame_rate_ext_d + 1);
+						pc->ticks_per_frame = 2;
+					}
+					break;
+				case 0x8: /* picture coding extension */
+					if (bytes_left >= 5) {
+						top_field_first = buf[3] & (1 << 7);
+						repeat_first_field = buf[3] & (1 << 1);
+						progressive_frame = buf[4] & (1 << 7);
+
+						/* check if we must repeat the frame */
+						pc->repeat_pict = 1;
+						if (repeat_first_field) {
+							if (pc->progressive_sequence) {
+								if (top_field_first)
+									pc->repeat_pict = 5;
+								else
+									pc->repeat_pict = 3;
+							} else if (progressive_frame) {
+								pc->repeat_pict = 2;
+							}
+						}
+
+						if (!pc->progressive_sequence && !progressive_frame) {
+							if (top_field_first)
+								pc->field_order = AV_FIELD_TT;
+							else
+								pc->field_order = AV_FIELD_BB;
+						} else
+							pc->field_order = AV_FIELD_PROGRESSIVE;
+					}
+					break;
+				}
+			}
+			break;
+		case -1:
+			goto the_end;
+		default:
+			/* we stop parsing when we encounter a slice. It ensures
+			that this function takes a negligible amount of time */
+			if (start_code >= SLICE_MIN_START_CODE &&
+				start_code <= SLICE_MAX_START_CODE)
+				goto the_end;
+			break;
+		}
+	}
+the_end:
+
+	if (pix_fmt != AV_PIX_FMT_NONE) {
+		pc->format = pix_fmt;
+		pc->coded_width  = ALIGN(pc->width,  16);
+		pc->coded_height = ALIGN(pc->height, 16);
+	}
+}
+
+int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps)
+{
+	ps->head_parsed = false;
+
+	mpegvideo_extract_headers(buf, size, ps);
+
+	if (ps->dec_ps.width && ps->dec_ps.height)
+		ps->head_parsed = true;
+
+	return 0;
+}
+
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h
new file mode 100644
index 0000000..8abbc78
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mpeg12_parser.h
@@ -0,0 +1,98 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_mpeg12_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#ifndef AML_MPEG12_PARSER_H
+#define AML_MPEG12_PARSER_H
+
+#include "../aml_vcodec_drv.h"
+#include "../utils/common.h"
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+#include "../utils/pixfmt.h"
+#endif
+
+
+/* Start codes. */
+#define SEQ_END_CODE            0x000001b7
+#define SEQ_START_CODE          0x000001b3
+#define GOP_START_CODE          0x000001b8
+#define PICTURE_START_CODE      0x00000100
+#define SLICE_MIN_START_CODE    0x00000101
+#define SLICE_MAX_START_CODE    0x000001af
+#define EXT_START_CODE          0x000001b5
+#define USER_START_CODE         0x000001b2
+#define SLICE_START_CODE        0x000001b7
+
+enum AVFieldOrder {
+	AV_FIELD_UNKNOWN,
+	AV_FIELD_PROGRESSIVE,
+	AV_FIELD_TT,          //< Top coded_first, top displayed first
+	AV_FIELD_BB,          //< Bottom coded first, bottom displayed first
+	AV_FIELD_TB,          //< Top coded first, bottom displayed first
+	AV_FIELD_BT,          //< Bottom coded first, top displayed first
+};
+
+struct MpvParseContext {
+	struct AVRational frame_rate;
+	int progressive_sequence;
+	int width, height;
+
+	int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+	int pict_type; /* XXX: Put it back in AVCodecContext. */
+	enum AVFieldOrder field_order;
+	int format;
+	/**
+	* Dimensions of the coded video.
+	*/
+	int coded_width;
+	int coded_height;
+	/**
+	* For some codecs, the time base is closer to the field rate than the frame rate.
+	* Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+	* if no telecine is used ...
+	*
+	* Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+	*/
+	int ticks_per_frame;
+	/**
+	* Size of the frame reordering buffer in the decoder.
+	* For MPEG-2 it is 1 IPB or 0 low delay IP.
+	* - encoding: Set by libavcodec.
+	* - decoding: Set by libavcodec.
+	*/
+	int has_b_frames;
+	/**
+	* - decoding: For codecs that store a framerate value in the compressed
+	*             bitstream, the decoder may export it here. { 0, 1} when
+	*             unknown.
+	* - encoding: May be used to signal the framerate of CFR content to an
+	*             encoder.
+	*/
+	struct AVRational framerate;
+};
+
+struct mpeg12_param_sets {
+	bool head_parsed;
+	/* currently active parameter sets */
+	struct MpvParseContext dec_ps;
+};
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps);
+#else
+inline int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps) { return -1; }
+#endif
+
+#endif
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c
new file mode 100644
index 0000000..f680b12
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.c
@@ -0,0 +1,1250 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mpeg4_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const u8 ff_mpeg4_dc_threshold[8]={
+    99, 13, 15, 17, 19, 21, 23, 0
+};
+
+/* these matrixes will be permuted for the idct */
+const int16_t ff_mpeg4_default_intra_matrix[64] = {
+	 8, 17, 18, 19, 21, 23, 25, 27,
+	17, 18, 19, 21, 23, 25, 27, 28,
+	20, 21, 22, 23, 24, 26, 28, 30,
+	21, 22, 23, 24, 26, 28, 30, 32,
+	22, 23, 24, 26, 28, 30, 32, 35,
+	23, 24, 26, 28, 30, 32, 35, 38,
+	25, 26, 28, 30, 32, 35, 38, 41,
+	27, 28, 30, 32, 35, 38, 41, 45,
+};
+
+const int16_t ff_mpeg4_default_non_intra_matrix[64] = {
+	16, 17, 18, 19, 20, 21, 22, 23,
+	17, 18, 19, 20, 21, 22, 23, 24,
+	18, 19, 20, 21, 22, 23, 24, 25,
+	19, 20, 21, 22, 23, 24, 26, 27,
+	20, 21, 22, 23, 25, 26, 27, 28,
+	21, 22, 23, 24, 26, 27, 28, 30,
+	22, 23, 24, 26, 27, 28, 30, 31,
+	23, 24, 25, 27, 28, 30, 31, 33,
+};
+
+const struct AVRational ff_h263_pixel_aspect[16] = {
+	{  0,  1 },
+	{  1,  1 },
+	{ 12, 11 },
+	{ 10, 11 },
+	{ 16, 11 },
+	{ 40, 33 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+	{  0,  1 },
+};
+
+/* As per spec, studio start code search isn't the same as the old type of start code */
+static void next_start_code_studio(struct get_bits_context *gb)
+{
+	align_get_bits(gb);
+
+	while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) {
+		get_bits(gb, 8);
+	}
+}
+
+static int read_quant_matrix_ext(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+	int i, /*j,*/ v;
+
+	if (get_bits1(gb)) {
+		if (get_bits_left(gb) < 64*8)
+			return -1;
+		/* intra_quantiser_matrix */
+		for (i = 0; i < 64; i++) {
+			v = get_bits(gb, 8);
+			//j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+			//s->intra_matrix[j]        = v;
+			//s->chroma_intra_matrix[j] = v;
+		}
+	}
+
+	if (get_bits1(gb)) {
+		if (get_bits_left(gb) < 64*8)
+			return -1;
+		/* non_intra_quantiser_matrix */
+		for (i = 0; i < 64; i++) {
+			get_bits(gb, 8);
+		}
+	}
+
+	if (get_bits1(gb)) {
+		if (get_bits_left(gb) < 64*8)
+			return -1;
+		/* chroma_intra_quantiser_matrix */
+		for (i = 0; i < 64; i++) {
+			v = get_bits(gb, 8);
+			//j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+			//s->chroma_intra_matrix[j] = v;
+		}
+	}
+
+	if (get_bits1(gb)) {
+		if (get_bits_left(gb) < 64*8)
+			return -1;
+		/* chroma_non_intra_quantiser_matrix */
+		for (i = 0; i < 64; i++) {
+			get_bits(gb, 8);
+		}
+	}
+
+	next_start_code_studio(gb);
+	return 0;
+}
+
+static void extension_and_user_data(struct MpegEncContext *s, struct get_bits_context *gb, int id)
+{
+	u32 startcode;
+	u8 extension_type;
+
+	startcode = show_bits_long(gb, 32);
+	if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) {
+		if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) {
+			skip_bits_long(gb, 32);
+			extension_type = get_bits(gb, 4);
+			if (extension_type == QUANT_MATRIX_EXT_ID)
+				read_quant_matrix_ext(s, gb);
+		}
+	}
+}
+
+
+static int decode_studio_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+	int width, height;
+	int bits_per_raw_sample;
+
+	// random_accessible_vol and video_object_type_indication have already
+	// been read by the caller decode_vol_header()
+	skip_bits(gb, 4); /* video_object_layer_verid */
+	ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */
+	skip_bits(gb, 4); /* video_object_layer_shape_extension */
+	skip_bits1(gb); /* progressive_sequence */
+	if (ctx->shape != BIN_ONLY_SHAPE) {
+		ctx->rgb = get_bits1(gb); /* rgb_components */
+		s->chroma_format = get_bits(gb, 2); /* chroma_format */
+		if (!s->chroma_format) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "illegal chroma format\n");
+			return -1;
+		}
+
+		bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */
+		if (bits_per_raw_sample == 10) {
+			if (ctx->rgb) {
+				ctx->pix_fmt = AV_PIX_FMT_GBRP10;
+			} else {
+				ctx->pix_fmt = s->chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10;
+			}
+		}
+		else {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample);
+			return -1;
+		}
+		ctx->bits_per_raw_sample = bits_per_raw_sample;
+	}
+	if (ctx->shape == RECT_SHAPE) {
+		check_marker(gb, "before video_object_layer_width");
+		width = get_bits(gb, 14); /* video_object_layer_width */
+		check_marker(gb, "before video_object_layer_height");
+		height = get_bits(gb, 14); /* video_object_layer_height */
+		check_marker(gb, "after video_object_layer_height");
+
+		/* Do the same check as non-studio profile */
+		if (width && height) {
+			if (s->width && s->height &&
+				(s->width != width || s->height != height))
+				s->context_reinit = 1;
+			s->width  = width;
+			s->height = height;
+		}
+	}
+	s->aspect_ratio_info = get_bits(gb, 4);
+	if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
+		ctx->sample_aspect_ratio.num = get_bits(gb, 8);  // par_width
+		ctx->sample_aspect_ratio.den = get_bits(gb, 8);  // par_height
+	} else {
+		ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
+	}
+	skip_bits(gb, 4); /* frame_rate_code */
+	skip_bits(gb, 15); /* first_half_bit_rate */
+	check_marker(gb, "after first_half_bit_rate");
+	skip_bits(gb, 15); /* latter_half_bit_rate */
+	check_marker(gb, "after latter_half_bit_rate");
+	skip_bits(gb, 15); /* first_half_vbv_buffer_size */
+	check_marker(gb, "after first_half_vbv_buffer_size");
+	skip_bits(gb, 3); /* latter_half_vbv_buffer_size */
+	skip_bits(gb, 11); /* first_half_vbv_buffer_size */
+	check_marker(gb, "after first_half_vbv_buffer_size");
+	skip_bits(gb, 15); /* latter_half_vbv_occupancy */
+	check_marker(gb, "after latter_half_vbv_occupancy");
+	s->low_delay = get_bits1(gb);
+	s->mpeg_quant = get_bits1(gb); /* mpeg2_stream */
+
+	next_start_code_studio(gb);
+	extension_and_user_data(s, gb, 2);
+
+	return 0;
+}
+
+static int decode_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+	int width, height, vo_ver_id;
+
+	/* vol header */
+	skip_bits(gb, 1);                   /* random access */
+	s->vo_type = get_bits(gb, 8);
+
+	/* If we are in studio profile (per vo_type), check if its all consistent
+	* and if so continue pass control to decode_studio_vol_header().
+	* elIf something is inconsistent, error out
+	* else continue with (non studio) vol header decpoding.
+	*/
+	if (s->vo_type == CORE_STUDIO_VO_TYPE ||
+		s->vo_type == SIMPLE_STUDIO_VO_TYPE) {
+		if (ctx->profile != FF_PROFILE_UNKNOWN && ctx->profile != FF_PROFILE_MPEG4_SIMPLE_STUDIO)
+			return -1;
+		s->studio_profile = 1;
+		ctx->profile = FF_PROFILE_MPEG4_SIMPLE_STUDIO;
+		return decode_studio_vol_header(ctx, gb);
+	} else if (s->studio_profile) {
+		return -1;
+	}
+
+	if (get_bits1(gb) != 0) {           /* is_ol_id */
+		vo_ver_id = get_bits(gb, 4);    /* vo_ver_id */
+		skip_bits(gb, 3);               /* vo_priority */
+	} else {
+		vo_ver_id = 1;
+	}
+	s->aspect_ratio_info = get_bits(gb, 4);
+	if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
+		ctx->sample_aspect_ratio.num = get_bits(gb, 8);  // par_width
+		ctx->sample_aspect_ratio.den = get_bits(gb, 8);  // par_height
+	} else {
+		ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
+	}
+
+	if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
+		int chroma_format = get_bits(gb, 2);
+		if (chroma_format != CHROMA_420)
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "illegal chroma format\n");
+
+		s->low_delay = get_bits1(gb);
+		if (get_bits1(gb)) {    /* vbv parameters */
+			get_bits(gb, 15);   /* first_half_bitrate */
+			check_marker(gb, "after first_half_bitrate");
+			get_bits(gb, 15);   /* latter_half_bitrate */
+			check_marker(gb, "after latter_half_bitrate");
+			get_bits(gb, 15);   /* first_half_vbv_buffer_size */
+			check_marker(gb, "after first_half_vbv_buffer_size");
+			get_bits(gb, 3);    /* latter_half_vbv_buffer_size */
+			get_bits(gb, 11);   /* first_half_vbv_occupancy */
+			check_marker(gb, "after first_half_vbv_occupancy");
+			get_bits(gb, 15);   /* latter_half_vbv_occupancy */
+			check_marker(gb, "after latter_half_vbv_occupancy");
+		}
+	} else {
+		/* is setting low delay flag only once the smartest thing to do?
+		* low delay detection will not be overridden. */
+		if (s->picture_number == 0) {
+			switch (s->vo_type) {
+			case SIMPLE_VO_TYPE:
+			case ADV_SIMPLE_VO_TYPE:
+				s->low_delay = 1;
+				break;
+			default:
+				s->low_delay = 0;
+			}
+		}
+	}
+
+	ctx->shape = get_bits(gb, 2); /* vol shape */
+	if (ctx->shape != RECT_SHAPE)
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "only rectangular vol supported\n");
+	if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Gray shape not supported\n");
+		skip_bits(gb, 4);  /* video_object_layer_shape_extension */
+	}
+
+	check_marker(gb, "before time_increment_resolution");
+
+	ctx->framerate.num = get_bits(gb, 16);
+	if (!ctx->framerate.num) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "framerate==0\n");
+		return -1;
+	}
+
+	ctx->time_increment_bits = av_log2(ctx->framerate.num - 1) + 1;
+	if (ctx->time_increment_bits < 1)
+		ctx->time_increment_bits = 1;
+
+	check_marker(gb, "before fixed_vop_rate");
+
+	if (get_bits1(gb) != 0)     /* fixed_vop_rate  */
+		ctx->framerate.den = get_bits(gb, ctx->time_increment_bits);
+	else
+		ctx->framerate.den = 1;
+
+	//ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1}));
+
+	ctx->t_frame = 0;
+
+	if (ctx->shape != BIN_ONLY_SHAPE) {
+		if (ctx->shape == RECT_SHAPE) {
+			check_marker(gb, "before width");
+			width = get_bits(gb, 13);
+			check_marker(gb, "before height");
+			height = get_bits(gb, 13);
+			check_marker(gb, "after height");
+			if (width && height &&  /* they should be non zero but who knows */
+			!(s->width && s->codec_tag == AV_RL32("MP4S"))) {
+				if (s->width && s->height &&
+				(s->width != width || s->height != height))
+				s->context_reinit = 1;
+				s->width  = width;
+				s->height = height;
+			}
+		}
+
+		s->progressive_sequence  =
+		s->progressive_frame     = get_bits1(gb) ^ 1;
+		s->interlaced_dct        = 0;
+		if (!get_bits1(gb)) /* OBMC Disable */
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "MPEG-4 OBMC not supported (very likely buggy encoder)\n");
+		if (vo_ver_id == 1)
+			ctx->vol_sprite_usage = get_bits1(gb);    /* vol_sprite_usage */
+		else
+			ctx->vol_sprite_usage = get_bits(gb, 2);  /* vol_sprite_usage */
+
+		if (ctx->vol_sprite_usage == STATIC_SPRITE)
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Static Sprites not supported\n");
+		if (ctx->vol_sprite_usage == STATIC_SPRITE ||
+			ctx->vol_sprite_usage == GMC_SPRITE) {
+		if (ctx->vol_sprite_usage == STATIC_SPRITE) {
+			skip_bits(gb, 13); // sprite_width
+			check_marker(gb, "after sprite_width");
+			skip_bits(gb, 13); // sprite_height
+			check_marker(gb, "after sprite_height");
+			skip_bits(gb, 13); // sprite_left
+			check_marker(gb, "after sprite_left");
+			skip_bits(gb, 13); // sprite_top
+			check_marker(gb, "after sprite_top");
+		}
+		ctx->num_sprite_warping_points = get_bits(gb, 6);
+		if (ctx->num_sprite_warping_points > 3) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "%d sprite_warping_points\n",
+				ctx->num_sprite_warping_points);
+			ctx->num_sprite_warping_points = 0;
+			return -1;
+		}
+		s->sprite_warping_accuracy  = get_bits(gb, 2);
+		ctx->sprite_brightness_change = get_bits1(gb);
+		if (ctx->vol_sprite_usage == STATIC_SPRITE)
+			skip_bits1(gb); // low_latency_sprite
+		}
+		// FIXME sadct disable bit if verid!=1 && shape not rect
+
+		if (get_bits1(gb) == 1) {                   /* not_8_bit */
+				s->quant_precision = get_bits(gb, 4);   /* quant_precision */
+			if (get_bits(gb, 4) != 8)               /* bits_per_pixel */
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "N-bit not supported\n");
+			if (s->quant_precision != 5)
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "quant precision %d\n", s->quant_precision);
+			if (s->quant_precision<3 || s->quant_precision>9) {
+				s->quant_precision = 5;
+			}
+		} else {
+			s->quant_precision = 5;
+		}
+
+		// FIXME a bunch of grayscale shape things
+
+		if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
+			int i, v;
+
+			//mpeg4_load_default_matrices(s);
+
+			/* load custom intra matrix */
+			if (get_bits1(gb)) {
+				int last = 0;
+			for (i = 0; i < 64; i++) {
+				//int j;
+				if (get_bits_left(gb) < 8) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "insufficient data for custom matrix\n");
+					return -1;
+				}
+				v = get_bits(gb, 8);
+				if (v == 0)
+					break;
+
+				last = v;
+				//j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+				//s->intra_matrix[j]        = last;
+				//s->chroma_intra_matrix[j] = last;
+			}
+
+			/* replicate last value */
+			//for (; i < 64; i++) {
+				//int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+				//s->intra_matrix[j]        = last;
+				//s->chroma_intra_matrix[j] = last;
+			//}
+			}
+
+			/* load custom non intra matrix */
+			if (get_bits1(gb)) {
+				int last = 0;
+				for (i = 0; i < 64; i++) {
+					//int j;
+					if (get_bits_left(gb) < 8) {
+						v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "insufficient data for custom matrix\n");
+						return -1;
+					}
+					v = get_bits(gb, 8);
+					if (v == 0)
+						break;
+
+					last = v;
+					//j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+					//s->inter_matrix[j]        = v;
+					//s->chroma_inter_matrix[j] = v;
+				}
+
+				/* replicate last value */
+				//for (; i < 64; i++) {
+					//int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+					//s->inter_matrix[j]        = last;
+					//s->chroma_inter_matrix[j] = last;
+				//}
+			}
+
+			// FIXME a bunch of grayscale shape things
+		}
+
+		if (vo_ver_id != 1)
+			s->quarter_sample = get_bits1(gb);
+		else
+			s->quarter_sample = 0;
+
+		if (get_bits_left(gb) < 4) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VOL Header truncated\n");
+			return -1;
+		}
+
+		if (!get_bits1(gb)) {
+			int pos               = get_bits_count(gb);
+			int estimation_method = get_bits(gb, 2);
+			if (estimation_method < 2) {
+				if (!get_bits1(gb)) {
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* opaque */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* transparent */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* intra_cae */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* inter_cae */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* no_update */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* upsampling */
+				}
+				if (!get_bits1(gb)) {
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* intra_blocks */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* inter_blocks */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* inter4v_blocks */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* not coded blocks */
+				}
+				if (!check_marker(gb, "in complexity estimation part 1")) {
+					skip_bits_long(gb, pos - get_bits_count(gb));
+					goto no_cplx_est;
+				}
+				if (!get_bits1(gb)) {
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* dct_coeffs */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* dct_lines */
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* vlc_syms */
+					ctx->cplx_estimation_trash_i += 4 * get_bits1(gb);  /* vlc_bits */
+				}
+				if (!get_bits1(gb)) {
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* apm */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* npm */
+					ctx->cplx_estimation_trash_b += 8 * get_bits1(gb);  /* interpolate_mc_q */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* forwback_mc_q */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* halfpel2 */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* halfpel4 */
+				}
+				if (!check_marker(gb, "in complexity estimation part 2")) {
+					skip_bits_long(gb, pos - get_bits_count(gb));
+					goto no_cplx_est;
+				}
+				if (estimation_method == 1) {
+					ctx->cplx_estimation_trash_i += 8 * get_bits1(gb);  /* sadct */
+					ctx->cplx_estimation_trash_p += 8 * get_bits1(gb);  /* qpel */
+				}
+			} else
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid Complexity estimation method %d\n",
+				estimation_method);
+		} else {
+
+no_cplx_est:
+			ctx->cplx_estimation_trash_i =
+			ctx->cplx_estimation_trash_p =
+			ctx->cplx_estimation_trash_b = 0;
+		}
+
+		ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
+
+		s->data_partitioning = get_bits1(gb);
+		if (s->data_partitioning)
+			ctx->rvlc = get_bits1(gb);
+
+		if (vo_ver_id != 1) {
+			ctx->new_pred = get_bits1(gb);
+		if (ctx->new_pred) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "new pred not supported\n");
+			skip_bits(gb, 2); /* requested upstream message type */
+			skip_bits1(gb);   /* newpred segment type */
+		}
+		if (get_bits1(gb)) // reduced_res_vop
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "reduced resolution VOP not supported\n");
+		} else {
+			ctx->new_pred = 0;
+		}
+
+		ctx->scalability = get_bits1(gb);
+
+		if (ctx->scalability) {
+			struct get_bits_context bak = *gb;
+			int h_sampling_factor_n;
+			int h_sampling_factor_m;
+			int v_sampling_factor_n;
+			int v_sampling_factor_m;
+
+			skip_bits1(gb);    // hierarchy_type
+			skip_bits(gb, 4);  /* ref_layer_id */
+			skip_bits1(gb);    /* ref_layer_sampling_dir */
+			h_sampling_factor_n = get_bits(gb, 5);
+			h_sampling_factor_m = get_bits(gb, 5);
+			v_sampling_factor_n = get_bits(gb, 5);
+			v_sampling_factor_m = get_bits(gb, 5);
+			ctx->enhancement_type = get_bits1(gb);
+
+			if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
+				v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
+				/* illegal scalability header (VERY broken encoder),
+				* trying to workaround */
+				ctx->scalability = 0;
+				*gb            = bak;
+			} else
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "scalability not supported\n");
+
+			// bin shape stuff FIXME
+		}
+	}
+
+	if (1) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d  %s%s%s%s\n",
+			ctx->framerate.den, ctx->framerate.num,
+			ctx->time_increment_bits,
+			s->quant_precision,
+			s->progressive_sequence,
+			s->low_delay,
+			ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "",
+			s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : "");
+	}
+
+	return 0;
+}
+
+
+/**
+ * Decode the user data stuff in the header.
+ * Also initializes divx/xvid/lavc_version/build.
+ */
+static int decode_user_data(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+	char buf[256];
+	int i;
+	int e;
+	int ver = 0, build = 0, ver2 = 0, ver3 = 0;
+	char last;
+
+	for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
+		if (show_bits(gb, 23) == 0)
+		break;
+		buf[i] = get_bits(gb, 8);
+	}
+	buf[i] = 0;
+
+	/* divx detection */
+	e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
+	if (e < 2)
+		e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
+	if (e >= 2) {
+		ctx->divx_version = ver;
+		ctx->divx_build   = build;
+		s->divx_packed  = e == 3 && last == 'p';
+	}
+
+	/* libavcodec detection */
+	e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
+	if (e != 4)
+		e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
+	if (e != 4) {
+		e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
+		if (e > 1) {
+			if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Unknown Lavc version string encountered, %d.%d.%d; "
+					"clamping sub-version values to 8-bits.\n",
+					ver, ver2, ver3);
+			}
+			build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF);
+		}
+	}
+	if (e != 4) {
+		if (strcmp(buf, "ffmpeg") == 0)
+			ctx->lavc_build = 4600;
+	}
+	if (e == 4)
+		ctx->lavc_build = build;
+
+	/* Xvid detection */
+	e = sscanf(buf, "XviD%d", &build);
+	if (e == 1)
+		ctx->xvid_build = build;
+
+	return 0;
+}
+
+
+static int mpeg4_decode_gop_header(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+	int hours, minutes, seconds;
+
+	if (!show_bits(gb, 23)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "GOP header invalid\n");
+		return -1;
+	}
+
+	hours   = get_bits(gb, 5);
+	minutes = get_bits(gb, 6);
+	check_marker(gb, "in gop_header");
+	seconds = get_bits(gb, 6);
+
+	s->time_base = seconds + 60*(minutes + 60*hours);
+
+	skip_bits1(gb);
+	skip_bits1(gb);
+
+	return 0;
+}
+
+
+static int mpeg4_decode_profile_level(struct MpegEncContext *s, struct get_bits_context *gb, int *profile, int *level)
+{
+
+	*profile = get_bits(gb, 4);
+	*level   = get_bits(gb, 4);
+
+	// for Simple profile, level 0
+	if (*profile == 0 && *level == 8) {
+		*level = 0;
+	}
+
+	return 0;
+}
+
+
+static int decode_studiovisualobject(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+	int visual_object_type;
+
+	skip_bits(gb, 4); /* visual_object_verid */
+	visual_object_type = get_bits(gb, 4);
+	if (visual_object_type != VOT_VIDEO_ID) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "VO type %u", visual_object_type);
+		return -1;
+	}
+
+	next_start_code_studio(gb);
+	extension_and_user_data(s, gb, 1);
+
+	return 0;
+}
+
+
+static int mpeg4_decode_visual_object(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+	int visual_object_type;
+	int is_visual_object_identifier = get_bits1(gb);
+
+	if (is_visual_object_identifier) {
+		skip_bits(gb, 4+3);
+	}
+	visual_object_type = get_bits(gb, 4);
+
+	if (visual_object_type == VOT_VIDEO_ID ||
+	visual_object_type == VOT_STILL_TEXTURE_ID) {
+		int video_signal_type = get_bits1(gb);
+		if (video_signal_type) {
+			int video_range, color_description;
+			skip_bits(gb, 3); // video_format
+			video_range = get_bits1(gb);
+			color_description = get_bits1(gb);
+
+			s->ctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+
+			if (color_description) {
+				s->ctx->color_primaries = get_bits(gb, 8);
+				s->ctx->color_trc       = get_bits(gb, 8);
+				s->ctx->colorspace      = get_bits(gb, 8);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void decode_smpte_tc(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	skip_bits(gb, 16); /* Time_code[63..48] */
+	check_marker(gb, "after Time_code[63..48]");
+	skip_bits(gb, 16); /* Time_code[47..32] */
+	check_marker(gb, "after Time_code[47..32]");
+	skip_bits(gb, 16); /* Time_code[31..16] */
+	check_marker(gb, "after Time_code[31..16]");
+	skip_bits(gb, 16); /* Time_code[15..0] */
+	check_marker(gb, "after Time_code[15..0]");
+	skip_bits(gb, 4); /* reserved_bits */
+}
+
+static void reset_studio_dc_predictors(struct MpegEncContext *s)
+{
+	/* Reset DC Predictors */
+	s->last_dc[0] =
+	s->last_dc[1] =
+	s->last_dc[2] = 1 << (s->ctx->bits_per_raw_sample + s->dct_precision + s->intra_dc_precision - 1);
+}
+
+/**
+ * Decode the next studio vop header.
+ * @return <0 if something went wrong
+ */
+static int decode_studio_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+
+	if (get_bits_left(gb) <= 32)
+		return 0;
+
+	//s->decode_mb = mpeg4_decode_studio_mb;
+
+	decode_smpte_tc(ctx, gb);
+
+	skip_bits(gb, 10); /* temporal_reference */
+	skip_bits(gb, 2); /* vop_structure */
+	s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */
+	if (get_bits1(gb)) { /* vop_coded */
+		skip_bits1(gb); /* top_field_first */
+		skip_bits1(gb); /* repeat_first_field */
+		s->progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */
+	}
+
+	if (s->pict_type == AV_PICTURE_TYPE_I) {
+		if (get_bits1(gb))
+			reset_studio_dc_predictors(s);
+	}
+
+	if (ctx->shape != BIN_ONLY_SHAPE) {
+		s->alternate_scan = get_bits1(gb);
+		s->frame_pred_frame_dct = get_bits1(gb);
+		s->dct_precision = get_bits(gb, 2);
+		s->intra_dc_precision = get_bits(gb, 2);
+		s->q_scale_type = get_bits1(gb);
+	}
+
+	//if (s->alternate_scan) {    }
+
+	//mpeg4_load_default_matrices(s);
+
+	next_start_code_studio(gb);
+	extension_and_user_data(s, gb, 4);
+
+	return 0;
+}
+
+static int decode_new_pred(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	int len = FFMIN(ctx->time_increment_bits + 3, 15);
+
+	get_bits(gb, len);
+	if (get_bits1(gb))
+		get_bits(gb, len);
+	check_marker(gb, "after new_pred");
+
+	return 0;
+}
+
+static int decode_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+	int time_incr, time_increment;
+	int64_t pts;
+
+	s->mcsel       = 0;
+	s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I;        /* pict type: I = 0 , P = 1 */
+	if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay &&
+		ctx->vol_control_parameters == 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "low_delay flag set incorrectly, clearing it\n");
+		s->low_delay = 0;
+	}
+
+	s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
+	/*if (s->partitioned_frame)
+		s->decode_mb = mpeg4_decode_partitioned_mb;
+	else
+		s->decode_mb = mpeg4_decode_mb;*/
+
+	time_incr = 0;
+	while (get_bits1(gb) != 0)
+		time_incr++;
+
+	check_marker(gb, "before time_increment");
+
+	if (ctx->time_increment_bits == 0 ||
+		!(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits);
+
+		for (ctx->time_increment_bits = 1;
+			ctx->time_increment_bits < 16;
+			ctx->time_increment_bits++) {
+			if (s->pict_type == AV_PICTURE_TYPE_P ||
+				(s->pict_type == AV_PICTURE_TYPE_S &&
+				ctx->vol_sprite_usage == GMC_SPRITE)) {
+				if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
+					break;
+			} else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
+				break;
+		}
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits);
+		if (ctx->framerate.num && 4*ctx->framerate.num < 1<<ctx->time_increment_bits) {
+			ctx->framerate.num = 1<<ctx->time_increment_bits;
+			//ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1}));
+		}
+	}
+
+	if (IS_3IV1)
+		time_increment = get_bits1(gb);        // FIXME investigate further
+	else
+		time_increment = get_bits(gb, ctx->time_increment_bits);
+
+	if (s->pict_type != AV_PICTURE_TYPE_B) {
+		s->last_time_base = s->time_base;
+		s->time_base     += time_incr;
+		s->time = s->time_base * (int64_t)ctx->framerate.num + time_increment;
+		//if (s->workaround_bugs & FF_BUG_UMP4) { }
+		s->pp_time         = s->time - s->last_non_b_time;
+		s->last_non_b_time = s->time;
+	} else {
+		s->time    = (s->last_time_base + time_incr) * (int64_t)ctx->framerate.num + time_increment;
+		s->pb_time = s->pp_time - (s->last_non_b_time - s->time);
+		if (s->pp_time <= s->pb_time ||
+			s->pp_time <= s->pp_time - s->pb_time ||
+			s->pp_time <= 0) {
+			/* messed up order, maybe after seeking? skipping current B-frame */
+			return FRAME_SKIPPED;
+		}
+		//ff_mpeg4_init_direct_mv(s);
+
+			if (ctx->t_frame == 0)
+		ctx->t_frame = s->pb_time;
+		if (ctx->t_frame == 0)
+			ctx->t_frame = 1;  // 1/0 protection
+		s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) -
+		ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
+		s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) -
+		ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
+		if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) {
+			s->pb_field_time = 2;
+			s->pp_field_time = 4;
+			if (!s->progressive_sequence)
+				return FRAME_SKIPPED;
+		}
+	}
+
+	if (ctx->framerate.den)
+		pts = ROUNDED_DIV(s->time, ctx->framerate.den);
+	else
+		pts = AV_NOPTS_VALUE;
+	v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "MPEG4 PTS: %lld\n", pts);
+
+	check_marker(gb, "before vop_coded");
+
+	/* vop coded */
+	if (get_bits1(gb) != 1) {
+		if (1)
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "vop not coded\n");
+		return FRAME_SKIPPED;
+	}
+	if (ctx->new_pred)
+		decode_new_pred(ctx, gb);
+
+	if (ctx->shape != BIN_ONLY_SHAPE &&
+		(s->pict_type == AV_PICTURE_TYPE_P ||
+		(s->pict_type == AV_PICTURE_TYPE_S &&
+		ctx->vol_sprite_usage == GMC_SPRITE))) {
+		/* rounding type for motion estimation */
+		s->no_rounding = get_bits1(gb);
+	} else {
+		s->no_rounding = 0;
+	}
+	// FIXME reduced res stuff
+
+	if (ctx->shape != RECT_SHAPE) {
+		if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
+			skip_bits(gb, 13);  /* width */
+			check_marker(gb, "after width");
+			skip_bits(gb, 13);  /* height */
+			check_marker(gb, "after height");
+			skip_bits(gb, 13);  /* hor_spat_ref */
+			check_marker(gb, "after hor_spat_ref");
+			skip_bits(gb, 13);  /* ver_spat_ref */
+		}
+		skip_bits1(gb);         /* change_CR_disable */
+
+		if (get_bits1(gb) != 0)
+			skip_bits(gb, 8);   /* constant_alpha_value */
+	}
+
+	// FIXME complexity estimation stuff
+
+	if (ctx->shape != BIN_ONLY_SHAPE) {
+		skip_bits_long(gb, ctx->cplx_estimation_trash_i);
+		if (s->pict_type != AV_PICTURE_TYPE_I)
+			skip_bits_long(gb, ctx->cplx_estimation_trash_p);
+		if (s->pict_type == AV_PICTURE_TYPE_B)
+			skip_bits_long(gb, ctx->cplx_estimation_trash_b);
+
+		if (get_bits_left(gb) < 3) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Header truncated\n");
+			return -1;
+		}
+		ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
+		if (!s->progressive_sequence) {
+			s->top_field_first = get_bits1(gb);
+			s->alternate_scan  = get_bits1(gb);
+		} else
+			s->alternate_scan = 0;
+	}
+
+	/*if (s->alternate_scan) { } */
+
+	if (s->pict_type == AV_PICTURE_TYPE_S) {
+		if((ctx->vol_sprite_usage == STATIC_SPRITE ||
+			ctx->vol_sprite_usage == GMC_SPRITE)) {
+			//if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
+				//return -1;
+			if (ctx->sprite_brightness_change)
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "sprite_brightness_change not supported\n");
+			if (ctx->vol_sprite_usage == STATIC_SPRITE)
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "static sprite not supported\n");
+		} else {
+			memset(s->sprite_offset, 0, sizeof(s->sprite_offset));
+			memset(s->sprite_delta, 0, sizeof(s->sprite_delta));
+		}
+	}
+
+	if (ctx->shape != BIN_ONLY_SHAPE) {
+		s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision);
+		if (s->qscale == 0) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG-4 header (qscale=0)\n");
+			return -1;  // makes no sense to continue, as there is nothing left from the image then
+		}
+
+		if (s->pict_type != AV_PICTURE_TYPE_I) {
+			s->f_code = get_bits(gb, 3);        /* fcode_for */
+			if (s->f_code == 0) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG-4 header (f_code=0)\n");
+					s->f_code = 1;
+				return -1;  // makes no sense to continue, as there is nothing left from the image then
+			}
+		} else
+			s->f_code = 1;
+
+		if (s->pict_type == AV_PICTURE_TYPE_B) {
+			s->b_code = get_bits(gb, 3);
+			if (s->b_code == 0) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Error, header damaged or not MPEG4 header (b_code=0)\n");
+					s->b_code=1;
+				return -1; // makes no sense to continue, as the MV decoding will break very quickly
+			}
+		} else
+			s->b_code = 1;
+
+		if (1) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%ld tincr:%d\n",
+				s->qscale, s->f_code, s->b_code,
+				s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
+				gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
+				s->top_field_first, s->quarter_sample ? "q" : "h",
+				s->data_partitioning, ctx->resync_marker,
+				ctx->num_sprite_warping_points, s->sprite_warping_accuracy,
+				1 - s->no_rounding, s->vo_type,
+				ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
+				ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
+				ctx->cplx_estimation_trash_b,
+				s->time,
+				time_increment);
+		}
+
+		if (!ctx->scalability) {
+			if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I)
+				skip_bits1(gb);  // vop shape coding type
+		} else {
+			if (ctx->enhancement_type) {
+				int load_backward_shape = get_bits1(gb);
+				if (load_backward_shape)
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "load backward shape isn't supported\n");
+			}
+			skip_bits(gb, 2);  // ref_select_code
+		}
+	}
+	/* detect buggy encoders which don't set the low_delay flag
+	* (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames
+	* easily (although it's buggy too) */
+	if (s->vo_type == 0 && ctx->vol_control_parameters == 0 &&
+		ctx->divx_version == -1 && s->picture_number == 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
+			s->low_delay = 1;
+	}
+
+	s->picture_number++;  // better than pic number==0 always ;)
+
+	// FIXME add short header support
+	//s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
+	//s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
+
+	return 0;
+}
+
+/**
+ * Decode MPEG-4 headers.
+ * @return <0 if no VOP found (or a damaged one)
+ *         FRAME_SKIPPED if a not coded VOP is found
+ *         0 if a VOP is found
+ */
+int ff_mpeg4_decode_picture_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+	struct MpegEncContext *s = &ctx->m;
+
+	unsigned startcode, v;
+	int ret;
+	int vol = 0;
+	int bits_per_raw_sample = 0;
+
+	s->ctx = ctx;
+
+	/* search next start code */
+	align_get_bits(gb);
+
+	// If we have not switched to studio profile than we also did not switch bps
+	// that means something else (like a previous instance) outside set bps which
+	// would be inconsistant with the currect state, thus reset it
+	if (!s->studio_profile && bits_per_raw_sample != 8)
+		bits_per_raw_sample = 0;
+
+	if (show_bits(gb, 24) == 0x575630) {
+		skip_bits(gb, 24);
+		if (get_bits(gb, 8) == 0xF0)
+			goto end;
+	}
+
+	startcode = 0xff;
+	for (;;) {
+		if (get_bits_count(gb) >= gb->size_in_bits) {
+			if (gb->size_in_bits == 8) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "frame skip %d\n", gb->size_in_bits);
+				return FRAME_SKIPPED;  // divx bug
+			} else
+				return -1;  // end of stream
+		}
+
+		/* use the bits after the test */
+		v = get_bits(gb, 8);
+		startcode = ((startcode << 8) | v) & 0xffffffff;
+
+		if ((startcode & 0xFFFFFF00) != 0x100)
+			continue;  // no startcode
+
+		if (1) { //debug
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "startcode: %3X \n", startcode);
+			if (startcode <= 0x11F)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Start\n");
+			else if (startcode <= 0x12F)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Layer Start\n");
+			else if (startcode <= 0x13F)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Reserved\n");
+			else if (startcode <= 0x15F)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FGS bp start\n");
+			else if (startcode <= 0x1AF)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Reserved\n");
+			else if (startcode == 0x1B0)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Seq Start\n");
+			else if (startcode == 0x1B1)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Seq End\n");
+			else if (startcode == 0x1B2)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "User Data\n");
+			else if (startcode == 0x1B3)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Group of VOP start\n");
+			else if (startcode == 0x1B4)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Session Error\n");
+			else if (startcode == 0x1B5)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Visual Object Start\n");
+			else if (startcode == 0x1B6)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Video Object Plane start\n");
+			else if (startcode == 0x1B7)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "slice start\n");
+			else if (startcode == 0x1B8)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "extension start\n");
+			else if (startcode == 0x1B9)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "fgs start\n");
+			else if (startcode == 0x1BA)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FBA Object start\n");
+			else if (startcode == 0x1BB)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "FBA Object Plane start\n");
+			else if (startcode == 0x1BC)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Mesh Object start\n");
+			else if (startcode == 0x1BD)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Mesh Object Plane start\n");
+			else if (startcode == 0x1BE)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Still Texture Object start\n");
+			else if (startcode == 0x1BF)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Spatial Layer start\n");
+			else if (startcode == 0x1C0)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture SNR Layer start\n");
+			else if (startcode == 0x1C1)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Tile start\n");
+			else if (startcode == 0x1C2)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "Texture Shape Layer start\n");
+			else if (startcode == 0x1C3)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "stuffing start\n");
+			else if (startcode <= 0x1C5)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "reserved\n");
+			else if (startcode <= 0x1FF)
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "System start\n");
+		}
+
+		if (startcode >= 0x120 && startcode <= 0x12F) {
+			if (vol) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Ignoring multiple VOL headers\n");
+				continue;
+			}
+			vol++;
+			if ((ret = decode_vol_header(ctx, gb)) < 0)
+				return ret;
+		} else if (startcode == USER_DATA_STARTCODE) {
+			decode_user_data(ctx, gb);
+		} else if (startcode == GOP_STARTCODE) {
+			mpeg4_decode_gop_header(s, gb);
+		} else if (startcode == VOS_STARTCODE) {
+		int profile, level;
+		mpeg4_decode_profile_level(s, gb, &profile, &level);
+		if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO &&
+			(level > 0 && level < 9)) {
+				s->studio_profile = 1;
+				next_start_code_studio(gb);
+				extension_and_user_data(s, gb, 0);
+			} else if (s->studio_profile) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Mixes studio and non studio profile\n");
+				return -1;
+			}
+			ctx->profile = profile;
+			ctx->level   = level;
+		} else if (startcode == VISUAL_OBJ_STARTCODE) {
+			if (s->studio_profile) {
+				if ((ret = decode_studiovisualobject(ctx, gb)) < 0)
+					return ret;
+			} else
+			mpeg4_decode_visual_object(s, gb);
+		} else if (startcode == VOP_STARTCODE) {
+			break;
+		}
+
+		align_get_bits(gb);
+		startcode = 0xff;
+	}
+
+end:
+	if (s->studio_profile) {
+		if (!bits_per_raw_sample) {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Missing VOL header\n");
+			return -1;
+		}
+		return decode_studio_vop_header(ctx, gb);
+	} else
+		return decode_vop_header(ctx, gb);
+}
+
+int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps)
+{
+	int ret = 0;
+	struct get_bits_context gb;
+
+	ps->head_parsed = false;
+
+	init_get_bits8(&gb, buf, size);
+
+	ret = ff_mpeg4_decode_picture_header(&ps->dec_ps, &gb);
+	if (ret < -1) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Failed to parse extradata\n");
+		return ret;
+	}
+
+	if (ps->dec_ps.m.width && ps->dec_ps.m.height)
+		ps->head_parsed = true;
+
+	return 0;
+}
+
diff --git a/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h
new file mode 100644
index 0000000..3e5bf62
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_mpeg4_parser.h
@@ -0,0 +1,275 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_mpeg4_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#ifndef AVCODEC_MPEG4VIDEO_H
+#define AVCODEC_MPEG4VIDEO_H
+
+#include "../aml_vcodec_drv.h"
+#include "../utils/common.h"
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+#include "../utils/pixfmt.h"
+#endif
+
+//mpeg4 profile
+#define FF_PROFILE_MPEG4_SIMPLE                     0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE            1
+#define FF_PROFILE_MPEG4_CORE                       2
+#define FF_PROFILE_MPEG4_MAIN                       3
+#define FF_PROFILE_MPEG4_N_BIT                      4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE           5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION      6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE     7
+#define FF_PROFILE_MPEG4_HYBRID                     8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME         9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE             10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING           11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE             12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO             14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE           15
+
+// shapes
+#define RECT_SHAPE       0
+#define BIN_SHAPE        1
+#define BIN_ONLY_SHAPE   2
+#define GRAY_SHAPE       3
+
+#define SIMPLE_VO_TYPE           1
+#define CORE_VO_TYPE             3
+#define MAIN_VO_TYPE             4
+#define NBIT_VO_TYPE             5
+#define ARTS_VO_TYPE            10
+#define ACE_VO_TYPE             12
+#define SIMPLE_STUDIO_VO_TYPE   14
+#define CORE_STUDIO_VO_TYPE     15
+#define ADV_SIMPLE_VO_TYPE      17
+
+#define VOT_VIDEO_ID 1
+#define VOT_STILL_TEXTURE_ID 2
+
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+// aspect_ratio_info
+#define EXTENDED_PAR 15
+
+//vol_sprite_usage / sprite_enable
+#define STATIC_SPRITE 1
+#define GMC_SPRITE 2
+
+#define MOTION_MARKER 0x1F001
+#define DC_MARKER     0x6B001
+
+#define VOS_STARTCODE        0x1B0
+#define USER_DATA_STARTCODE  0x1B2
+#define GOP_STARTCODE        0x1B3
+#define VISUAL_OBJ_STARTCODE 0x1B5
+#define VOP_STARTCODE        0x1B6
+#define SLICE_STARTCODE      0x1B7
+#define EXT_STARTCODE        0x1B8
+
+#define QUANT_MATRIX_EXT_ID  0x3
+
+/* smaller packets likely don't contain a real frame */
+#define MAX_NVOP_SIZE 19
+
+#define IS_3IV1 0
+
+#define CHROMA_420 1
+#define CHROMA_422 2
+#define CHROMA_444 3
+
+#define FF_ASPECT_EXTENDED 15
+
+#define AV_NOPTS_VALUE          (LONG_MIN)
+
+/**
+ * Return value for header parsers if frame is not coded.
+ * */
+#define FRAME_SKIPPED 100
+
+enum AVPictureType {
+    AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+    AV_PICTURE_TYPE_I,     ///< Intra
+    AV_PICTURE_TYPE_P,     ///< Predicted
+    AV_PICTURE_TYPE_B,     ///< Bi-dir predicted
+    AV_PICTURE_TYPE_S,     ///< S(GMC)-VOP MPEG-4
+    AV_PICTURE_TYPE_SI,    ///< Switching Intra
+    AV_PICTURE_TYPE_SP,    ///< Switching Predicted
+    AV_PICTURE_TYPE_BI,    ///< BI type
+};
+
+struct VLC {
+	int bits;
+	short (*table)[2]; ///< code, bits
+	int table_size, table_allocated;
+};
+
+/**
+ * MpegEncContext.
+ */
+struct MpegEncContext {
+	struct mpeg4_dec_param *ctx;
+
+	/* the following parameters must be initialized before encoding */
+	int width, height;///< picture size. must be a multiple of 16
+	int codec_tag;             ///< internal codec_tag upper case converted from avctx codec_tag
+	int picture_number;       //FIXME remove, unclear definition
+
+	/** matrix transmitted in the bitstream */
+	u16 intra_matrix[64];
+	u16 chroma_intra_matrix[64];
+	u16 inter_matrix[64];
+	u16 chroma_inter_matrix[64];
+
+	/* MPEG-4 specific */
+	int studio_profile;
+	int time_base;                  ///< time in seconds of last I,P,S Frame
+	int quant_precision;
+	int quarter_sample;              ///< 1->qpel, 0->half pel ME/MC
+	int aspect_ratio_info; //FIXME remove
+	int sprite_warping_accuracy;
+	int data_partitioning;           ///< data partitioning flag from header
+	int low_delay;                   ///< no reordering needed / has no B-frames
+	int vo_type;
+	int mpeg_quant;
+
+	/* divx specific, used to workaround (many) bugs in divx5 */
+	int divx_packed;
+
+	/* MPEG-2-specific - I wished not to have to support this mess. */
+	int progressive_sequence;
+
+	int progressive_frame;
+	int interlaced_dct;
+
+	int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication)
+	const u8 *y_dc_scale_table;     ///< qscale -> y_dc_scale table
+	const u8 *c_dc_scale_table;     ///< qscale -> c_dc_scale table
+	int qscale;		    ///< QP
+	int chroma_qscale;	    ///< chroma QP
+	int pict_type;		    ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
+	int f_code;		    ///< forward MV resolution
+	int b_code;		    ///< backward MV resolution for B-frames (MPEG-4)
+	int no_rounding;  /**< apply no rounding to motion compensation (MPEG-4, msmpeg4, ...)
+	    for B-frames rounding mode is always 0 */
+	int last_time_base;
+	long time;		    ///< time of current frame
+	long last_non_b_time;
+	u16 pp_time;		    ///< time distance between the last 2 p,s,i frames
+	u16 pb_time;		    ///< time distance between the last b and p,s,i frame
+	u16 pp_field_time;
+	u16 pb_field_time;	    ///< like above, just for interlaced
+	int real_sprite_warping_points;
+	int sprite_offset[2][2];	     ///< sprite offset[isChroma][isMVY]
+	int sprite_delta[2][2];	     ///< sprite_delta [isY][isMVY]
+	int mcsel;
+	int partitioned_frame;	     ///< is current frame partitioned
+	int top_field_first;
+	int alternate_scan;
+	int last_dc[3];                ///< last DC values for MPEG-1
+	int dct_precision;
+	int intra_dc_precision;
+	int frame_pred_frame_dct;
+	int q_scale_type;
+	int context_reinit;
+	int chroma_format;
+};
+
+struct mpeg4_dec_param {
+	struct MpegEncContext m;
+
+	/// number of bits to represent the fractional part of time
+	int time_increment_bits;
+	int shape;
+	int vol_sprite_usage;
+	int sprite_brightness_change;
+	int num_sprite_warping_points;
+	/// sprite trajectory points
+	u16 sprite_traj[4][2];
+	/// sprite shift [isChroma]
+	int sprite_shift[2];
+
+	// reversible vlc
+	int rvlc;
+	/// could this stream contain resync markers
+	int resync_marker;
+	/// time distance of first I -> B, used for interlaced B-frames
+	int t_frame;
+
+	int new_pred;
+	int enhancement_type;
+	int scalability;
+	int use_intra_dc_vlc;
+
+	/// QP above which the ac VLC should be used for intra dc
+	int intra_dc_threshold;
+
+	/* bug workarounds */
+	int divx_version;
+	int divx_build;
+	int xvid_build;
+	int lavc_build;
+
+	/// flag for having shown the warning about invalid Divx B-frames
+	int showed_packed_warning;
+	/** does the stream contain the low_delay flag,
+	*  used to work around buggy encoders. */
+	int vol_control_parameters;
+	int cplx_estimation_trash_i;
+	int cplx_estimation_trash_p;
+	int cplx_estimation_trash_b;
+
+	struct VLC studio_intra_tab[12];
+	struct VLC studio_luma_dc;
+	struct VLC studio_chroma_dc;
+
+	int rgb;
+
+	struct AVRational time_base;
+	int ticks_per_frame;
+	struct AVRational sample_aspect_ratio;
+	enum AVColorPrimaries color_primaries;
+	enum AVColorTransferCharacteristic color_trc;
+	enum AVColorSpace colorspace;
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	enum AVPixelFormat pix_fmt;
+	enum AVColorRange color_range;
+	enum AVChromaLocation chroma_sample_location;
+#endif
+	int err_recognition;
+	int idct_algo;
+	int bits_per_raw_sample;
+	int profile;
+	int level;
+	struct AVRational framerate;
+	int flags;
+};
+
+struct mpeg4_param_sets {
+	bool head_parsed;
+	/* currently active parameter sets */
+	struct mpeg4_dec_param dec_ps;
+};
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps);
+#else
+inline int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps) { return -1; }
+#endif
+
+#endif
+
diff --git a/drivers/amvdec_ports/decoder/aml_vp9_parser.c b/drivers/amvdec_ports/decoder/aml_vp9_parser.c
new file mode 100644
index 0000000..b027b8a
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_vp9_parser.c
@@ -0,0 +1,318 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_vp9_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+#define VP9_SYNCCODE 0x498342
+
+static int read_colorspace_details(struct VP9Context *s, int profile)
+{
+	static const enum AVColorSpace colorspaces[8] = {
+		AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
+		AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
+	};
+
+	enum AVColorSpace colorspace;
+	int color_range;
+	int bits = profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
+
+	s->bpp_index = bits;
+	s->s.h.bpp = 8 + bits * 2;
+	s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
+	colorspace = colorspaces[get_bits(&s->gb, 3)];
+	if (colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
+		if (profile & 1) {
+			if (get_bits1(&s->gb)) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Reserved bit set in RGB\n");
+				return -1;
+			}
+		} else {
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "RGB not supported in profile %d\n", profile);
+			return -1;
+		}
+	} else {
+		static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
+			{ { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
+			{ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
+			{ { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
+			{ AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
+			{ { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
+			{ AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }};
+		color_range = get_bits1(&s->gb) ? 2 : 1;
+		if (profile & 1) {
+			s->ss_h = get_bits1(&s->gb);
+			s->ss_v = get_bits1(&s->gb);
+			s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
+			if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "YUV 4:2:0 not supported in profile %d\n", profile);
+				return -1;
+			} else if (get_bits1(&s->gb)) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Profile %d color details reserved bit set\n", profile);
+				return -1;
+			}
+		} else {
+			s->ss_h = s->ss_v = 1;
+			s->pix_fmt = pix_fmt_for_ss[bits][1][1];
+		}
+	}
+
+	return 0;
+}
+
+int decode_frame_header(const u8 *data, int size, struct VP9Context *s, int *ref)
+{
+	int ret, last_invisible, profile;
+
+	/* general header */
+	if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Failed to initialize bitstream reader\n");
+		return ret;
+	}
+
+	if (get_bits(&s->gb, 2) != 0x2) { // frame marker
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid frame marker\n");
+		return -1;
+	}
+
+	profile  = get_bits1(&s->gb);
+	profile |= get_bits1(&s->gb) << 1;
+	if (profile == 3)
+		profile += get_bits1(&s->gb);
+
+	if (profile > 3) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Profile %d is not yet supported\n", profile);
+		return -1;
+	}
+
+	s->s.h.profile = profile;
+	if (get_bits1(&s->gb)) {
+		*ref = get_bits(&s->gb, 3);
+		return 0;
+	}
+
+	s->last_keyframe  = s->s.h.keyframe;
+	s->s.h.keyframe   = !get_bits1(&s->gb);
+
+	last_invisible   = s->s.h.invisible;
+	s->s.h.invisible = !get_bits1(&s->gb);
+	s->s.h.errorres  = get_bits1(&s->gb);
+	s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
+
+	if (s->s.h.keyframe) {
+		if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+			v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sync code\n");
+			return -1;
+		}
+		if ((ret = read_colorspace_details(s,profile)) < 0)
+			return ret;
+		// for profile 1, here follows the subsampling bits
+		s->s.h.refreshrefmask = 0xff;
+		s->width = get_bits(&s->gb, 16) + 1;
+		s->height = get_bits(&s->gb, 16) + 1;
+		if (get_bits1(&s->gb)) { // has scaling
+			s->render_width = get_bits(&s->gb, 16) + 1;
+			s->render_height = get_bits(&s->gb, 16) + 1;
+		} else {
+			s->render_width = s->width;
+			s->render_height = s->height;
+		}
+		/*pr_info("keyframe res: (%d x %d), render size: (%d x %d)\n",
+			s->width, s->height, s->render_width, s->render_height);*/
+	} else {
+		s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
+		s->s.h.resetctx  = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
+		if (s->s.h.intraonly) {
+			if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+				v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid sync code\n");
+				return -1;
+			}
+			if (profile >= 1) {
+				if ((ret = read_colorspace_details(s, profile)) < 0)
+					return ret;
+			} else {
+				s->ss_h = s->ss_v = 1;
+				s->s.h.bpp = 8;
+				s->bpp_index = 0;
+				s->bytesperpixel = 1;
+				s->pix_fmt = AV_PIX_FMT_YUV420P;
+			}
+			s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+			s->width = get_bits(&s->gb, 16) + 1;
+			s->height = get_bits(&s->gb, 16) + 1;
+			if (get_bits1(&s->gb)) { // has scaling
+				s->render_width = get_bits(&s->gb, 16) + 1;
+				s->render_height = get_bits(&s->gb, 16) + 1;
+			} else {
+				s->render_width = s->width;
+				s->render_height = s->height;
+			}
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "intra res: (%d x %d), render size: (%d x %d)\n",
+				s->width, s->height, s->render_width, s->render_height);
+		} else {
+			s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+			s->s.h.refidx[0]      = get_bits(&s->gb, 3);
+			s->s.h.signbias[0]    = get_bits1(&s->gb) && !s->s.h.errorres;
+			s->s.h.refidx[1]      = get_bits(&s->gb, 3);
+			s->s.h.signbias[1]    = get_bits1(&s->gb) && !s->s.h.errorres;
+			s->s.h.refidx[2]      = get_bits(&s->gb, 3);
+			s->s.h.signbias[2]    = get_bits1(&s->gb) && !s->s.h.errorres;
+
+			/*refresh_frame_flags;
+			for (i = 0; i < REFS_PER_FRAME; ++i) {
+				frame_refs[i];
+				ref_frame_sign_biases[i];
+			}
+			frame_size_from_refs();
+			high_precision_mv;
+			interp_filter();*/
+
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int vp9_superframe_split_filter(struct vp9_superframe_split *s)
+{
+	int i, j, ret, marker;
+	bool is_superframe = false;
+	int *prefix = (int *)s->data;
+
+	if (!s->data)
+		return -1;
+
+	#define AML_PREFIX ('V' << 24 | 'L' << 16 | 'M' << 8 | 'A')
+	if (prefix[3] == AML_PREFIX) {
+		s->prefix_size = 16;
+		/*pr_info("the frame data has beed added header\n");*/
+	}
+
+	marker = s->data[s->data_size - 1];
+	if ((marker & 0xe0) == 0xc0) {
+		int length_size = 1 + ((marker >> 3) & 0x3);
+		int   nb_frames = 1 + (marker & 0x7);
+		int    idx_size = 2 + nb_frames * length_size;
+
+		if (s->data_size >= idx_size &&
+			s->data[s->data_size - idx_size] == marker) {
+			s64 total_size = 0;
+			int idx = s->data_size + 1 - idx_size;
+
+			for (i = 0; i < nb_frames; i++) {
+				int frame_size = 0;
+				for (j = 0; j < length_size; j++)
+					frame_size |= s->data[idx++] << (j * 8);
+
+				total_size += frame_size;
+				if (frame_size < 0 ||
+					total_size > s->data_size - idx_size) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "Invalid frame size in a sframe: %d\n",
+						frame_size);
+					ret = -EINVAL;
+					goto fail;
+				}
+				s->sizes[i] = frame_size;
+			}
+
+			s->nb_frames         = nb_frames;
+			s->size              = total_size;
+			s->next_frame        = 0;
+			s->next_frame_offset = 0;
+			is_superframe        = true;
+		}
+	}else {
+		s->nb_frames = 1;
+		s->sizes[0]  = s->data_size;
+		s->size      = s->data_size;
+	}
+
+	/*pr_info("sframe: %d, frames: %d, IN: %x, OUT: %x\n",
+		is_superframe, s->nb_frames,
+		s->data_size, s->size);*/
+
+	/* parse uncompressed header. */
+	if (is_superframe) {
+		/* bitstream profile. */
+		/* frame type. (intra or inter) */
+		/* colorspace descriptor */
+		/* ... */
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "the frame is a superframe.\n");
+	}
+
+	/*pr_err("in: %x, %d, out: %x, sizes %d,%d,%d,%d,%d,%d,%d,%d\n",
+		s->data_size,
+		s->nb_frames,
+		s->size,
+		s->sizes[0],
+		s->sizes[1],
+		s->sizes[2],
+		s->sizes[3],
+		s->sizes[4],
+		s->sizes[5],
+		s->sizes[6],
+		s->sizes[7]);*/
+
+	return 0;
+fail:
+	return ret;
+}
+
+int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps)
+{
+	int i, ref = -1, ret = 0;
+	struct vp9_superframe_split s = {0};
+
+	/*parse superframe.*/
+	s.data = data;
+	s.data_size = size;
+	ret = vp9_superframe_split_filter(&s);
+	if (ret) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR, "parse frames failed.\n");
+		return ret;
+	}
+
+	for (i = 0; i < s.nb_frames; i++) {
+		u32 len = s.sizes[i] - s.prefix_size;
+		u8 *buf = s.data + s.next_frame_offset + s.prefix_size;
+
+		ret = decode_frame_header(buf, len, &ps->ctx, &ref);
+		if (!ret) {
+			ps->head_parsed = ref < 0 ? true : false;
+			return 0;
+		}
+
+		s.next_frame_offset = len + s.prefix_size;
+	}
+
+	return ret;
+}
+
diff --git a/drivers/amvdec_ports/decoder/aml_vp9_parser.h b/drivers/amvdec_ports/decoder/aml_vp9_parser.h
new file mode 100644
index 0000000..ddeddec
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/aml_vp9_parser.h
@@ -0,0 +1,184 @@
+/*
+ * drivers/amvdec_ports/decoder/aml_vp9_parser.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef AML_VP9_PARSER_H
+#define AML_VP9_PARSER_H
+
+#include "../aml_vcodec_drv.h"
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+#include "../utils/pixfmt.h"
+#include "../utils/get_bits.h"
+#endif
+
+#define MAX_SEGMENT	8
+
+struct VP9BitstreamHeader {
+	// bitstream header
+	u8 profile;
+	u8 bpp;
+	u8 keyframe;
+	u8 invisible;
+	u8 errorres;
+	u8 intraonly;
+	u8 resetctx;
+	u8 refreshrefmask;
+	u8 highprecisionmvs;
+	u8 allowcompinter;
+	u8 refreshctx;
+	u8 parallelmode;
+	u8 framectxid;
+	u8 use_last_frame_mvs;
+	u8 refidx[3];
+	u8 signbias[3];
+	u8 fixcompref;
+	u8 varcompref[2];
+	struct {
+		u8 level;
+		char sharpness;
+	} filter;
+	struct {
+		u8 enabled;
+		u8 updated;
+		char mode[2];
+		char ref[4];
+	} lf_delta;
+	u8 yac_qi;
+	char ydc_qdelta, uvdc_qdelta, uvac_qdelta;
+	u8 lossless;
+	struct {
+		u8 enabled;
+		u8 temporal;
+		u8 absolute_vals;
+		u8 update_map;
+		u8 prob[7];
+		u8 pred_prob[3];
+		struct {
+			u8 q_enabled;
+			u8 lf_enabled;
+			u8 ref_enabled;
+			u8 skip_enabled;
+			u8 ref_val;
+			int16_t q_val;
+			char lf_val;
+			int16_t qmul[2][2];
+			u8 lflvl[4][2];
+		} feat[MAX_SEGMENT];
+	} segmentation;
+	struct {
+		u32 log2_tile_cols, log2_tile_rows;
+		u32 tile_cols, tile_rows;
+	} tiling;
+
+	int uncompressed_header_size;
+	int compressed_header_size;
+};
+
+struct VP9SharedContext {
+	struct VP9BitstreamHeader h;
+
+	//struct ThreadFrame refs[8];
+#define CUR_FRAME 0
+#define REF_FRAME_MVPAIR 1
+#define REF_FRAME_SEGMAP 2
+	//struct VP9Frame frames[3];
+};
+
+struct VP9Context {
+	struct VP9SharedContext s;
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	struct get_bits_context gb;
+#endif
+	int pass, active_tile_cols;
+
+	u8 ss_h, ss_v;
+	u8 last_bpp, bpp_index, bytesperpixel;
+	u8 last_keyframe;
+	// sb_cols/rows, rows/cols and last_fmt are used for allocating all internal
+	// arrays, and are thus per-thread. w/h and gf_fmt are synced between threads
+	// and are therefore per-stream. pix_fmt represents the value in the header
+	// of the currently processed frame.
+	int width;
+	int height;
+
+	int render_width;
+	int render_height;
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+	enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
+#endif
+	u32 sb_cols, sb_rows, rows, cols;
+
+	struct {
+		u8 lim_lut[64];
+		u8 mblim_lut[64];
+	} filter_lut;
+	struct {
+		u8 coef[4][2][2][6][6][3];
+	} prob_ctx[4];
+	struct {
+		u8 coef[4][2][2][6][6][11];
+	} prob;
+
+	// contextual (above) cache
+	u8 *above_partition_ctx;
+	u8 *above_mode_ctx;
+	// FIXME maybe merge some of the below in a flags field?
+	u8 *above_y_nnz_ctx;
+	u8 *above_uv_nnz_ctx[2];
+	u8 *above_skip_ctx; // 1bit
+	u8 *above_txfm_ctx; // 2bit
+	u8 *above_segpred_ctx; // 1bit
+	u8 *above_intra_ctx; // 1bit
+	u8 *above_comp_ctx; // 1bit
+	u8 *above_ref_ctx; // 2bit
+	u8 *above_filter_ctx;
+
+	// whole-frame cache
+	u8 *intra_pred_data[3];
+
+	// block reconstruction intermediates
+	int block_alloc_using_2pass;
+	uint16_t mvscale[3][2];
+	u8 mvstep[3][2];
+};
+
+struct vp9_superframe_split {
+	/*in data*/
+	u8 *data;
+	u32 data_size;
+
+	/*out data*/
+	int nb_frames;
+	int size;
+	int next_frame;
+	u32 next_frame_offset;
+	int prefix_size;
+	int sizes[8];
+};
+
+struct vp9_param_sets {
+	bool head_parsed;
+	struct VP9Context ctx;
+};
+
+#ifdef CONFIG_AMLOGIC_MEDIA_V4L_SOFTWARE_PARSER
+int vp9_superframe_split_filter(struct vp9_superframe_split *s);
+int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps);
+#else
+inline int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps) { return -1; }
+#endif
+
+#endif //AML_VP9_PARSER_H
diff --git a/drivers/amvdec_ports/decoder/utils.h b/drivers/amvdec_ports/decoder/utils.h
new file mode 100644
index 0000000..26b1552
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/utils.h
@@ -0,0 +1,31 @@
+/*
+ * drivers/amlogic/media_modules/amvdec_ports/decoder/utils.h
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _UTILS_H
+#define _UTILS_H
+
+#define MAX(a, b)  (((a) > (b)) ? (a) : (b))
+#define MIN(a, b)  (((a) < (b)) ? (a) : (b))
+#define CLAMP(x, low, high) \
+	(((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
+#define BITAT(x, n) ((x & (1 << n)) == (1 << n))
+
+typedef unsigned char uint8_t;
+typedef int int32_t;
+typedef unsigned int uint32_t;
+
+#endif //_UTILS_H
diff --git a/drivers/amvdec_ports/decoder/vdec_av1_if.c b/drivers/amvdec_ports/decoder/vdec_av1_if.c
new file mode 100644
index 0000000..e8693d4
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_av1_if.c
@@ -0,0 +1,1329 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_drv.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "../utils/common.h"
+
+#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2
+#include <trace/events/meson_atrace.h>
+
+#define PREFIX_SIZE	(16)
+
+#define HEADER_BUFFER_SIZE			(32 * 1024)
+#define SYNC_CODE				(0x498342)
+
+extern int av1_need_prefix;
+
+/**
+ * struct av1_fb - av1 decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct av1_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_av1_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_av1_dec_info {
+	uint32_t dpb_sz;
+	uint32_t resolution_changed;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_av1_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf      : Header parsing buffer (AP-W, VPU-R)
+ * @list_free    : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp    : display frame buffer ring list (AP-R, VPU-W)
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_av1_vsi {
+	char *header_buf;
+	int sps_size;
+	int pps_size;
+	int sei_size;
+	int head_offset;
+	struct vdec_av1_dec_info dec;
+	struct vdec_pic_info pic;
+	struct vdec_pic_info cur_pic;
+	struct v4l2_rect crop;
+	bool is_combine;
+	int nalu_pos;
+};
+
+/**
+ * struct vdec_av1_inst - av1 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to aml_vcodec_ctx
+ * @vsi      : VPU shared information
+ */
+struct vdec_av1_inst {
+	unsigned int num_nalu;
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vdec_adapt vdec;
+	struct vdec_av1_vsi *vsi;
+	struct vcodec_vfm_s vfm;
+	struct aml_dec_params parms;
+	struct completion comp;
+};
+
+/*!\brief OBU types. */
+enum OBU_TYPE {
+	OBU_SEQUENCE_HEADER = 1,
+	OBU_TEMPORAL_DELIMITER = 2,
+	OBU_FRAME_HEADER = 3,
+	OBU_TILE_GROUP = 4,
+	OBU_METADATA = 5,
+	OBU_FRAME = 6,
+	OBU_REDUNDANT_FRAME_HEADER = 7,
+	OBU_TILE_LIST = 8,
+	OBU_PADDING = 15,
+};
+
+/*!\brief OBU metadata types. */
+enum OBU_METADATA_TYPE {
+	OBU_METADATA_TYPE_RESERVED_0 = 0,
+	OBU_METADATA_TYPE_HDR_CLL = 1,
+	OBU_METADATA_TYPE_HDR_MDCV = 2,
+	OBU_METADATA_TYPE_SCALABILITY = 3,
+	OBU_METADATA_TYPE_ITUT_T35 = 4,
+	OBU_METADATA_TYPE_TIMECODE = 5,
+};
+
+struct ObuHeader {
+	size_t size;  // Size (1 or 2 bytes) of the OBU header (including the
+			// optional OBU extension header) in the bitstream.
+	enum OBU_TYPE type;
+	int has_size_field;
+	int has_extension;
+	// The following fields come from the OBU extension header and therefore are
+	// only used if has_extension is true.
+	int temporal_layer_id;
+	int spatial_layer_id;
+};
+
+static const size_t kMaximumLeb128Size = 8;
+static const u8 kLeb128ByteMask = 0x7f;  // Binary: 01111111
+
+// Disallow values larger than 32-bits to ensure consistent behavior on 32 and
+// 64 bit targets: value is typically used to determine buffer allocation size
+// when decoded.
+static const u64 kMaximumLeb128Value = ULONG_MAX;
+
+char obu_type_name[16][32] = {
+	"UNKNOWN",
+	"OBU_SEQUENCE_HEADER",
+	"OBU_TEMPORAL_DELIMITER",
+	"OBU_FRAME_HEADER",
+	"OBU_TILE_GROUP",
+	"OBU_METADATA",
+	"OBU_FRAME",
+	"OBU_REDUNDANT_FRAME_HEADER",
+	"OBU_TILE_LIST",
+	"UNKNOWN",
+	"UNKNOWN",
+	"UNKNOWN",
+	"UNKNOWN",
+	"UNKNOWN",
+	"UNKNOWN",
+	"OBU_PADDING"
+};
+
+char meta_type_name[6][32] = {
+	"OBU_METADATA_TYPE_RESERVED_0",
+	"OBU_METADATA_TYPE_HDR_CLL",
+	"OBU_METADATA_TYPE_HDR_MDCV",
+	"OBU_METADATA_TYPE_SCALABILITY",
+	"OBU_METADATA_TYPE_ITUT_T35",
+	"OBU_METADATA_TYPE_TIMECODE"
+};
+
+struct read_bit_buffer {
+	const u8 *bit_buffer;
+	const u8 *bit_buffer_end;
+	u32 bit_offset;
+};
+
+struct DataBuffer {
+	const u8 *data;
+	size_t size;
+};
+
+static int vdec_write_nalu(struct vdec_av1_inst *inst,
+	u8 *buf, u32 size, u64 ts);
+
+static void get_pic_info(struct vdec_av1_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"pic(%d, %d), buf(%d, %d)\n",
+		 pic->visible_width, pic->visible_height,
+		 pic->coded_width, pic->coded_height);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"Y(%d, %d), C(%d, %d)\n",
+		pic->y_bs_sz, pic->y_len_sz,
+		pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_av1_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"l=%d, t=%d, w=%d, h=%d\n",
+		 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_av1_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz);
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+	u8 *pbuf = parm;
+
+	pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+	pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:11;");
+	pbuf += sprintf(pbuf, "av1_double_write_mode:3;");
+	pbuf += sprintf(pbuf, "av1_buf_width:1920;");
+	pbuf += sprintf(pbuf, "av1_buf_height:1088;");
+	pbuf += sprintf(pbuf, "av1_max_pic_w:8192;");
+	pbuf += sprintf(pbuf, "av1_max_pic_h:4608;");
+	pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+	pbuf += sprintf(pbuf, "no_head:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;");
+
+	return parm - pbuf;
+}
+
+static void vdec_parser_parms(struct vdec_av1_inst *inst)
+{
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	if (ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+		u8 *pbuf = ctx->config.buf;
+
+		pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+		pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+			ctx->config.parm.dec.cfg.ref_buf_margin);
+		pbuf += sprintf(pbuf, "av1_double_write_mode:%d;",
+			ctx->config.parm.dec.cfg.double_write_mode);
+		pbuf += sprintf(pbuf, "av1_buf_width:%d;",
+			ctx->config.parm.dec.cfg.init_width);
+		pbuf += sprintf(pbuf, "av1_buf_height:%d;",
+			ctx->config.parm.dec.cfg.init_height);
+		pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+		pbuf += sprintf(pbuf, "no_head:0;");
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_endian);
+		pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;",
+			ctx->config.parm.dec.cfg.low_latency_mode);
+		ctx->config.length = pbuf - ctx->config.buf;
+	} else {
+		ctx->config.parm.dec.cfg.double_write_mode = 16;
+		ctx->config.parm.dec.cfg.ref_buf_margin = 7;
+		ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+	}
+
+	if ((ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_HDRINFO) &&
+		inst->parms.hdr.color_parms.present_flag) {
+		u8 *pbuf = ctx->config.buf + ctx->config.length;
+
+		pbuf += sprintf(pbuf, "HDRStaticInfo:%d;", 1);
+		pbuf += sprintf(pbuf, "mG.x:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[0][0]);
+		pbuf += sprintf(pbuf, "mG.y:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[0][1]);
+		pbuf += sprintf(pbuf, "mB.x:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[1][0]);
+		pbuf += sprintf(pbuf, "mB.y:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[1][1]);
+		pbuf += sprintf(pbuf, "mR.x:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[2][0]);
+		pbuf += sprintf(pbuf, "mR.y:%d;",
+			ctx->config.parm.dec.hdr.color_parms.primaries[2][1]);
+		pbuf += sprintf(pbuf, "mW.x:%d;",
+			ctx->config.parm.dec.hdr.color_parms.white_point[0]);
+		pbuf += sprintf(pbuf, "mW.y:%d;",
+			ctx->config.parm.dec.hdr.color_parms.white_point[1]);
+		pbuf += sprintf(pbuf, "mMaxDL:%d;",
+			ctx->config.parm.dec.hdr.color_parms.luminance[0] * 1000);
+		pbuf += sprintf(pbuf, "mMinDL:%d;",
+			ctx->config.parm.dec.hdr.color_parms.luminance[1]);
+		pbuf += sprintf(pbuf, "mMaxCLL:%d;",
+			ctx->config.parm.dec.hdr.color_parms.content_light_level.max_content);
+		pbuf += sprintf(pbuf, "mMaxFALL:%d;",
+			ctx->config.parm.dec.hdr.color_parms.content_light_level.max_pic_average);
+		ctx->config.length	= pbuf - ctx->config.buf;
+		inst->parms.hdr		= ctx->config.parm.dec.hdr;
+		inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_HDRINFO;
+	}
+
+	inst->vdec.config	= ctx->config;
+	inst->parms.cfg		= ctx->config.parm.dec.cfg;
+	inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+static int vdec_av1_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_av1_inst *inst = NULL;
+	int ret = -1;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->vdec.video_type	= VFORMAT_AV1;
+	inst->vdec.filp		= ctx->dev->filp;
+	inst->vdec.ctx		= ctx;
+	inst->ctx		= ctx;
+
+	vdec_parser_parms(inst);
+
+	/* set play mode.*/
+	if (ctx->is_drm_mode)
+		inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+	/* to eable av1 hw.*/
+	inst->vdec.port.type	= PORT_TYPE_HEVC;
+
+	/* init vfm */
+	inst->vfm.ctx		= ctx;
+	inst->vfm.ada_ctx	= &inst->vdec;
+	ret = vcodec_vfm_init(&inst->vfm);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"init vfm failed.\n");
+		goto err;
+	}
+
+	/* probe info from the stream */
+	inst->vsi = kzalloc(sizeof(struct vdec_av1_vsi), GFP_KERNEL);
+	if (!inst->vsi) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* alloc the header buffer to be used cache sps or spp etc.*/
+	inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+	if (!inst->vsi->header_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	init_completion(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"av1 Instance >> %lx\n", (ulong) inst);
+
+	ctx->ada_ctx	= &inst->vdec;
+	*h_vdec		= (unsigned long)inst;
+
+	/* init decoder. */
+	ret = video_decoder_init(&inst->vdec);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"vdec_av1 init err=%d\n", ret);
+		goto err;
+	}
+
+	//dump_init();
+
+	return 0;
+err:
+	if (inst)
+		vcodec_vfm_release(&inst->vfm);
+	if (inst && inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+	if (inst && inst->vsi)
+		kfree(inst->vsi);
+	if (inst)
+		kfree(inst);
+	*h_vdec = 0;
+
+	return ret;
+}
+
+static int parse_stream_ucode(struct vdec_av1_inst *inst,
+			      u8 *buf, u32 size, u64 timestamp)
+{
+	int ret = 0;
+
+	ret = vdec_write_nalu(inst, buf, size, timestamp);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write data failed. size: %d, err: %d\n", size, ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_av1_inst *inst,
+	ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle,
+		vdec_vframe_input_free, inst->ctx);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_av1_inst *inst, u8 *buf, u32 size)
+{
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+		"can not suppport parse stream by cpu.\n");
+
+	return -1;
+}
+
+static int vdec_av1_probe(unsigned long h_vdec,
+	struct aml_vcodec_mem *bs, void *out)
+{
+	struct vdec_av1_inst *inst =
+		(struct vdec_av1_inst *)h_vdec;
+	u8 *buf = (u8 *)bs->vaddr;
+	u32 size = bs->size;
+	int ret = 0;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if ((s->magic != AML_VIDEO_MAGIC) &&
+				(s->type != V4L_STREAM_TYPE_MATEDATA))
+				return -1;
+
+			if (inst->ctx->param_sets_from_ucode) {
+				ret = parse_stream_ucode(inst, s->data,
+					s->len, bs->timestamp);
+			} else {
+				ret = parse_stream_cpu(inst, s->data, s->len);
+			}
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = parse_stream_ucode_dma(inst, bs->addr, size,
+				bs->timestamp, BUFF_IDX(bs, bs->index));
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+		} else {
+			ret = parse_stream_cpu(inst, buf, size);
+		}
+	}
+
+	inst->vsi->cur_pic = inst->vsi->pic;
+
+	return ret;
+}
+
+static void vdec_av1_deinit(unsigned long h_vdec)
+{
+	ulong flags;
+	struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	video_decoder_release(&inst->vdec);
+
+	vcodec_vfm_release(&inst->vfm);
+
+	//dump_deinit();
+
+	spin_lock_irqsave(&ctx->slock, flags);
+	if (inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+
+	if (inst->vsi)
+		kfree(inst->vsi);
+
+	kfree(inst);
+
+	ctx->drv_handle = 0;
+	spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int vdec_av1_get_fb(struct vdec_av1_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_av1_get_vf(struct vdec_av1_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	struct vframe_s *vf = NULL;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	vf = peek_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"there is no vframe.\n");
+		*out = NULL;
+		return;
+	}
+
+	vf = get_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"the vframe is avalid.\n");
+		*out = NULL;
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+	fb->vf_handle = (unsigned long)vf;
+	fb->status = FB_ST_DISPLAY;
+
+	*out = fb;
+}
+
+// Returns 1 when OBU type is valid, and 0 otherwise.
+static int valid_obu_type(int obu_type)
+{
+	int valid_type = 0;
+
+	switch (obu_type) {
+	case OBU_SEQUENCE_HEADER:
+	case OBU_TEMPORAL_DELIMITER:
+	case OBU_FRAME_HEADER:
+	case OBU_TILE_GROUP:
+	case OBU_METADATA:
+	case OBU_FRAME:
+	case OBU_REDUNDANT_FRAME_HEADER:
+	case OBU_TILE_LIST:
+	case OBU_PADDING:
+		valid_type = 1;
+		break;
+	default:
+		break;
+	}
+
+	return valid_type;
+}
+
+size_t uleb_size_in_bytes(u64 value)
+{
+	size_t size = 0;
+
+	do {
+		++size;
+	} while ((value >>= 7) != 0);
+
+	return size;
+}
+
+int uleb_decode(const u8 *buffer, size_t available,
+	u64 *value, size_t *length)
+{
+	int i;
+
+	if (buffer && value) {
+		*value = 0;
+
+		for (i = 0; i < kMaximumLeb128Size && i < available; ++i) {
+			const u8 decoded_byte = *(buffer + i) & kLeb128ByteMask;
+
+			*value |= ((u64)decoded_byte) << (i * 7);
+			if ((*(buffer + i) >> 7) == 0) {
+				if (length) {
+					*length = i + 1;
+				}
+
+				// Fail on values larger than 32-bits to ensure consistent behavior on
+				// 32 and 64 bit targets: value is typically used to determine buffer
+				// allocation size.
+				if (*value > ULONG_MAX)
+					return -1;
+
+				return 0;
+			}
+		}
+	}
+
+	// If we get here, either the buffer/value pointers were invalid,
+	// or we ran over the available space
+	return -1;
+}
+
+int uleb_encode(u64 value, size_t available,
+	u8 *coded_value, size_t *coded_size)
+{
+	int i;
+	const size_t leb_size = uleb_size_in_bytes(value);
+
+	if (leb_size > kMaximumLeb128Size ||
+		leb_size > available || !coded_value || !coded_size) {
+		return -1;
+	}
+
+	for (i = 0; i < leb_size; ++i) {
+		u8 byte = value & 0x7f;
+
+		value >>= 7;
+		if (value != 0) byte |= 0x80;  // Signal that more bytes follow.
+
+		*(coded_value + i) = byte;
+	}
+
+	*coded_size = leb_size;
+
+	return 0;
+}
+
+static int rb_read_bit(struct read_bit_buffer *rb)
+{
+	const u32 off = rb->bit_offset;
+	const u32 p = off >> 3;
+	const int q = 7 - (int)(off & 0x7);
+
+	if (rb->bit_buffer + p < rb->bit_buffer_end) {
+		const int bit = (rb->bit_buffer[p] >> q) & 1;
+
+		rb->bit_offset = off + 1;
+		return bit;
+	} else {
+		return 0;
+	}
+}
+
+static int rb_read_literal(struct read_bit_buffer *rb, int bits)
+{
+	int value = 0, bit;
+
+	for (bit = bits - 1; bit >= 0; bit--)
+		value |= rb_read_bit(rb) << bit;
+
+	return value;
+}
+
+static int read_obu_size(const u8 *data,
+	size_t bytes_available,
+	size_t *const obu_size,
+	size_t *const length_field_size)
+{
+	u64 u_obu_size = 0;
+
+	if (uleb_decode(data, bytes_available, &u_obu_size, length_field_size) != 0) {
+		return -1;
+	}
+
+	if (u_obu_size > ULONG_MAX)
+		return -1;
+
+	*obu_size = (size_t) u_obu_size;
+
+	return 0;
+}
+
+// Parses OBU header and stores values in 'header'.
+static int read_obu_header(struct read_bit_buffer *rb,
+	int is_annexb, struct ObuHeader *header)
+{
+	int bit_buffer_byte_length;
+
+	if (!rb || !header)
+		return -1;
+
+	bit_buffer_byte_length = rb->bit_buffer_end - rb->bit_buffer;
+
+	if (bit_buffer_byte_length < 1)
+		return -1;
+
+	header->size = 1;
+
+	if (rb_read_bit(rb) != 0) {
+		// Forbidden bit. Must not be set.
+		return -1;
+	}
+
+	header->type = (enum OBU_TYPE) rb_read_literal(rb, 4);
+	if (!valid_obu_type(header->type))
+		return -1;
+
+	header->has_extension = rb_read_bit(rb);
+	header->has_size_field = rb_read_bit(rb);
+
+	if (!header->has_size_field && !is_annexb) {
+		// section 5 obu streams must have obu_size field set.
+		return -1;
+	}
+
+	if (rb_read_bit(rb) != 0) {
+		// obu_reserved_1bit must be set to 0.
+		return -1;
+	}
+
+	if (header->has_extension) {
+		if (bit_buffer_byte_length == 1)
+			return -1;
+
+		header->size += 1;
+		header->temporal_layer_id = rb_read_literal(rb, 3);
+		header->spatial_layer_id = rb_read_literal(rb, 2);
+		if (rb_read_literal(rb, 3) != 0) {
+			// extension_header_reserved_3bits must be set to 0.
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int read_obu_header_and_size(const u8 *data,
+	size_t bytes_available,
+	int is_annexb,
+	struct ObuHeader *obu_header,
+	size_t *const payload_size,
+	size_t *const bytes_read)
+{
+	size_t length_field_size_obu = 0;
+	size_t length_field_size_payload = 0;
+	size_t obu_size = 0;
+	int status = 0;
+	struct read_bit_buffer rb = { data + length_field_size_obu,
+		data + bytes_available, 0};
+
+	if (is_annexb) {
+		// Size field comes before the OBU header, and includes the OBU header
+		status = read_obu_size(data, bytes_available, &obu_size, &length_field_size_obu);
+		if (status != 0)
+			return status;
+	}
+
+	status = read_obu_header(&rb, is_annexb, obu_header);
+	if (status != 0)
+		return status;
+
+	if (!obu_header->has_size_field) {
+		// Derive the payload size from the data we've already read
+		if (obu_size < obu_header->size)
+			return -1;
+
+		*payload_size = obu_size - obu_header->size;
+	} else {
+		// Size field comes after the OBU header, and is just the payload size
+		status = read_obu_size(data + length_field_size_obu + obu_header->size,
+			bytes_available - length_field_size_obu - obu_header->size,
+			payload_size, &length_field_size_payload);
+		if (status != 0)
+			return status;
+	}
+
+	*bytes_read = length_field_size_obu + obu_header->size + length_field_size_payload;
+
+	return 0;
+}
+
+int parser_frame(int is_annexb, u8 *data, const u8 *data_end,
+	u8 *dst_data, u32 *frame_len, u8 *meta_buf, u32 *meta_len)
+{
+	int frame_decoding_finished = 0;
+	u32 obu_size = 0;
+	int seen_frame_header = 0;
+	int next_start_tile = 0;
+	struct DataBuffer obu_size_hdr;
+	u8 header[20] = {0};
+	u8 *p = NULL;
+	u32 rpu_size = 0;
+	struct ObuHeader obu_header;
+
+	memset(&obu_header, 0, sizeof(obu_header));
+
+	// decode frame as a series of OBUs
+	while (!frame_decoding_finished) {
+		//	struct read_bit_buffer rb;
+		size_t payload_size = 0;
+		size_t header_size = 0;
+		size_t bytes_read = 0;
+		const size_t bytes_available = data_end - data;
+		enum OBU_METADATA_TYPE meta_type;
+		int status;
+		u64 type;
+		u32 i;
+
+		if (bytes_available == 0 && !seen_frame_header) {
+			break;
+		}
+
+		status = read_obu_header_and_size(data, bytes_available, is_annexb,
+			&obu_header, &payload_size, &bytes_read);
+		if (status != 0) {
+			return -1;
+		}
+
+		// Record obu size header information.
+		obu_size_hdr.data = data + obu_header.size;
+		obu_size_hdr.size = bytes_read - obu_header.size;
+
+		// Note: read_obu_header_and_size() takes care of checking that this
+		// doesn't cause 'data' to advance past 'data_end'.
+
+		if ((size_t)(data_end - data - bytes_read) < payload_size) {
+			return -1;
+		}
+
+		v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "obu %s len %zu+%zu\n",
+			obu_type_name[obu_header.type],
+			bytes_read, payload_size);
+
+		if (!is_annexb) {
+			obu_size = bytes_read + payload_size + 4;
+			header_size = 20;
+		} else {
+			obu_size = bytes_read + payload_size;
+			header_size = 16;
+		}
+
+		header[0] = ((obu_size + 4) >> 24) & 0xff;
+		header[1] = ((obu_size + 4) >> 16) & 0xff;
+		header[2] = ((obu_size + 4) >> 8) & 0xff;
+		header[3] = ((obu_size + 4) >> 0) & 0xff;
+		header[4] = header[0] ^ 0xff;
+		header[5] = header[1] ^ 0xff;
+		header[6] = header[2] ^ 0xff;
+		header[7] = header[3] ^ 0xff;
+		header[8] = 0;
+		header[9] = 0;
+		header[10] = 0;
+		header[11] = 1;
+		header[12] = 'A';
+		header[13] = 'M';
+		header[14] = 'L';
+		header[15] = 'V';
+
+		// put new size to here as annexb
+		header[16] = (obu_size & 0xff) | 0x80;
+		header[17] = ((obu_size >> 7) & 0xff) | 0x80;
+		header[18] = ((obu_size >> 14) & 0xff) | 0x80;
+		header[19] = ((obu_size >> 21) & 0xff) | 0x00;
+
+		memcpy(dst_data, header, header_size);
+		dst_data += header_size;
+		memcpy(dst_data, data, bytes_read + payload_size);
+		dst_data += (bytes_read + payload_size);
+
+		data += bytes_read;
+		*frame_len += (header_size + bytes_read + payload_size);
+
+		switch (obu_header.type) {
+		case OBU_TEMPORAL_DELIMITER:
+			seen_frame_header = 0;
+			next_start_tile = 0;
+			break;
+		case OBU_SEQUENCE_HEADER:
+			// The sequence header should not change in the middle of a frame.
+			if (seen_frame_header) {
+				return -1;
+			}
+			break;
+		case OBU_FRAME_HEADER:
+			if (data_end == data + payload_size) {
+				frame_decoding_finished = 1;
+			} else {
+				seen_frame_header = 1;
+			}
+			break;
+		case OBU_REDUNDANT_FRAME_HEADER:
+		case OBU_FRAME:
+			if (obu_header.type == OBU_REDUNDANT_FRAME_HEADER) {
+				if (!seen_frame_header) {
+					return -1;
+				}
+			} else {
+				// OBU_FRAME_HEADER or OBU_FRAME.
+				if (seen_frame_header) {
+					return -1;
+				}
+			}
+			if (obu_header.type == OBU_FRAME) {
+				if (data_end == data + payload_size) {
+					frame_decoding_finished = 1;
+					seen_frame_header = 0;
+				}
+			}
+			break;
+		case OBU_TILE_GROUP:
+			if (!seen_frame_header) {
+				return -1;
+			}
+			if (data + payload_size == data_end)
+				frame_decoding_finished = 1;
+			if (frame_decoding_finished)
+				seen_frame_header = 0;
+			break;
+		case OBU_METADATA:
+			uleb_decode(data, 8, &type, &bytes_read);
+			if (type < 6)
+				meta_type = type;
+			else
+				meta_type = 0;
+			p = data + bytes_read;
+			v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+				"meta type %s %zu+%zu\n",
+				meta_type_name[type],
+				bytes_read,
+				payload_size - bytes_read);
+
+			if (meta_type == OBU_METADATA_TYPE_ITUT_T35) {
+#if 0 /* for dumping original obu payload */
+				for (i = 0; i < payload_size - bytes_read; i++) {
+					pr_info("%02x ", p[i]);
+					if (i % 16 == 15)
+						pr_info("\n");
+				}
+				if (i % 16 != 0)
+					pr_info("\n");
+#endif
+				if ((p[0] == 0xb5) /* country code */
+					&& ((p[1] == 0x00) && (p[2] == 0x3b)) /* terminal_provider_code */
+					&& ((p[3] == 0x00) && (p[4] == 0x00) && (p[5] == 0x08) && (p[6] == 0x00))) { /* terminal_provider_oriented_code */
+					v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+						"dolbyvison rpu\n");
+					meta_buf[0] = meta_buf[1] = meta_buf[2] = 0;
+					meta_buf[3] = 0x01;
+					meta_buf[4] = 0x19;
+
+					if (p[11] & 0x10) {
+						rpu_size = 0x100;
+						rpu_size |= (p[11] & 0x0f) << 4;
+						rpu_size |= (p[12] >> 4) & 0x0f;
+						if (p[12] & 0x08) {
+							v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+								"meta rpu in obu exceed 512 bytes\n");
+							break;
+						}
+						for (i = 0; i < rpu_size; i++) {
+							meta_buf[5 + i] = (p[12 + i] & 0x07) << 5;
+							meta_buf[5 + i] |= (p[13 + i] >> 3) & 0x1f;
+						}
+						rpu_size += 5;
+					} else {
+						rpu_size = (p[10] & 0x1f) << 3;
+						rpu_size |= (p[11] >> 5) & 0x07;
+						for (i = 0; i < rpu_size; i++) {
+							meta_buf[5 + i] = (p[11 + i] & 0x0f) << 4;
+							meta_buf[5 + i] |= (p[12 + i] >> 4) & 0x0f;
+						}
+						rpu_size += 5;
+					}
+					*meta_len = rpu_size;
+				}
+			} else if (meta_type == OBU_METADATA_TYPE_HDR_CLL) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 cll:\n");
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_cll = %x\n", (p[0] << 8) | p[1]);
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_fall = %x\n", (p[2] << 8) | p[3]);
+			} else if (meta_type == OBU_METADATA_TYPE_HDR_MDCV) {
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 primaries[r,g,b] = \n");
+				for (i = 0; i < 3; i++) {
+					v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, " %x, %x\n",
+						(p[i * 4] << 8) | p[i * 4 + 1],
+						(p[i * 4 + 2] << 8) | p[i * 4 + 3]);
+				}
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+					"white point = %x, %x\n", (p[12] << 8) | p[13], (p[14] << 8) | p[15]);
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+					"maxl = %x\n", (p[16] << 24) | (p[17] << 16) | (p[18] << 8) | p[19]);
+				v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+					"minl = %x\n", (p[20] << 24) | (p[21] << 16) | (p[22] << 8) | p[23]);
+			}
+			break;
+		case OBU_TILE_LIST:
+			break;
+		case OBU_PADDING:
+			break;
+		default:
+			// Skip unrecognized OBUs
+			break;
+		}
+
+		data += payload_size;
+	}
+
+	return 0;
+}
+
+static int vdec_write_nalu(struct vdec_av1_inst *inst,
+	u8 *buf, u32 size, u64 ts)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *data = NULL;
+	u32 length = 0;
+	bool need_prefix = av1_need_prefix;
+
+	if (need_prefix) {
+		u8 meta_buffer[1024] = {0};
+		u32 meta_size = 0;
+		u8 *src = buf;
+
+		data = vzalloc(size + 0x1000);
+		if (!data)
+			return -ENOMEM;
+
+		parser_frame(0, src, src + size, data, &length, meta_buffer, &meta_size);
+
+		if (length)
+			ret = vdec_vframe_write(vdec, data, length, ts);
+		else
+			ret = -1;
+
+		vfree(data);
+	} else {
+		ret = vdec_vframe_write(vdec, buf, size, ts);
+	}
+
+	return ret;
+}
+
+static bool monitor_res_change(struct vdec_av1_inst *inst, u8 *buf, u32 size)
+{
+	int ret = -1;
+	u8 *p = buf;
+	int len = size;
+	u32 synccode = av1_need_prefix ?
+		((p[1] << 16) | (p[2] << 8) | p[3]) :
+		((p[17] << 16) | (p[18] << 8) | p[19]);
+
+	if (synccode == SYNC_CODE) {
+		ret = parse_stream_cpu(inst, p, len);
+		if (!ret && (inst->vsi->cur_pic.coded_width !=
+			inst->vsi->pic.coded_width ||
+			inst->vsi->cur_pic.coded_height !=
+			inst->vsi->pic.coded_height)) {
+			inst->vsi->cur_pic = inst->vsi->pic;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int vdec_av1_decode(unsigned long h_vdec,
+			   struct aml_vcodec_mem *bs, bool *res_chg)
+{
+	struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *buf;
+	u32 size;
+	int ret = -1;
+
+	if (bs == NULL)
+		return -1;
+
+	buf = (u8 *) bs->vaddr;
+	size = bs->size;
+
+	if (vdec_input_full(vdec)) {
+		ATRACE_COUNTER("vdec_input_full", 0);
+		return -EAGAIN;
+	}
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if (s->magic != AML_VIDEO_MAGIC)
+				return -1;
+
+			if (!inst->ctx->param_sets_from_ucode &&
+				(s->type == V4L_STREAM_TYPE_MATEDATA)) {
+				if ((*res_chg = monitor_res_change(inst,
+					s->data, s->len)))
+				return 0;
+			}
+
+			ret = vdec_vframe_write(vdec,
+				s->data,
+				s->len,
+				bs->timestamp);
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = vdec_vframe_write_with_dma(vdec,
+				bs->addr, size, bs->timestamp,
+				BUFF_IDX(bs, bs->index),
+				vdec_vframe_input_free, inst->ctx);
+		}
+	} else {
+		/*checked whether the resolution changes.*/
+		if ((!inst->ctx->param_sets_from_ucode) &&
+			(*res_chg = monitor_res_change(inst, buf, size)))
+			return 0;
+
+		ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+	}
+	ATRACE_COUNTER("v4l2_decode_write", ret);
+
+	return ret;
+}
+
+ static void get_param_config_info(struct vdec_av1_inst *inst,
+	struct aml_dec_params *parms)
+ {
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO)
+		 parms->cfg = inst->parms.cfg;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO)
+		 parms->ps = inst->parms.ps;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO)
+		 parms->hdr = inst->parms.hdr;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO)
+		 parms->cnt = inst->parms.cnt;
+
+	 parms->parms_status |= inst->parms.parms_status;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"parms status: %u\n", parms->parms_status);
+ }
+
+static int vdec_av1_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	int ret = 0;
+	struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the av1 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_av1_get_vf(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		ret = vdec_av1_get_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	case GET_PARAM_CONFIG_INFO:
+		get_param_config_info(inst, out);
+		break;
+
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid get parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void set_param_write_sync(struct vdec_av1_inst *inst)
+{
+	complete(&inst->comp);
+}
+
+static void set_param_ps_info(struct vdec_av1_inst *inst,
+	struct aml_vdec_ps_infos *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_av1_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->visible_width;
+	pic->visible_height	= ps->visible_height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ps->coded_width;
+	pic->coded_height	= ps->coded_height;
+
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+
+	/* calc DPB size */
+	dec->dpb_sz		= ps->dpb_size;
+
+	inst->parms.ps 	= *ps;
+	inst->parms.parms_status |=
+		V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	/*wake up*/
+	complete(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+		ps->visible_width, ps->visible_height,
+		ps->coded_width, ps->coded_height,
+		ps->dpb_size);
+}
+
+static void set_param_hdr_info(struct vdec_av1_inst *inst,
+	struct aml_vdec_hdr_infos *hdr)
+{
+	if ((inst->parms.parms_status &
+		V4L2_CONFIG_PARM_DECODE_HDRINFO)) {
+		inst->parms.hdr = *hdr;
+		inst->parms.parms_status |=
+			V4L2_CONFIG_PARM_DECODE_HDRINFO;
+		aml_vdec_dispatch_event(inst->ctx,
+			V4L2_EVENT_SRC_CH_HDRINFO);
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"av1 set HDR infos\n");
+	}
+}
+
+static void set_param_post_event(struct vdec_av1_inst *inst, u32 *event)
+{
+		aml_vdec_dispatch_event(inst->ctx, *event);
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"av1 post event: %d\n", *event);
+}
+
+static int vdec_av1_set_param(unsigned long h_vdec,
+	enum vdec_set_param_type type, void *in)
+{
+	int ret = 0;
+	struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the av1 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case SET_PARAM_WRITE_FRAME_SYNC:
+		set_param_write_sync(inst);
+		break;
+
+	case SET_PARAM_PS_INFO:
+		set_param_ps_info(inst, in);
+		break;
+
+	case SET_PARAM_HDR_INFO:
+		set_param_hdr_info(inst, in);
+		break;
+
+	case SET_PARAM_POST_EVENT:
+		set_param_post_event(inst, in);
+		break;
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid set parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_av1_if = {
+	.init		= vdec_av1_init,
+	.probe		= vdec_av1_probe,
+	.decode		= vdec_av1_decode,
+	.get_param	= vdec_av1_get_param,
+	.set_param	= vdec_av1_set_param,
+	.deinit		= vdec_av1_deinit,
+};
+
+struct vdec_common_if *get_av1_dec_comm_if(void);
+
+struct vdec_common_if *get_av1_dec_comm_if(void)
+{
+	return &vdec_av1_if;
+}
+
diff --git a/drivers/amvdec_ports/decoder/vdec_h264_if.c b/drivers/amvdec_ports/decoder/vdec_h264_if.c
new file mode 100644
index 0000000..d13fc34
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_h264_if.c
@@ -0,0 +1,1126 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_drv.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_h264_parser.h"
+#include "../utils/common.h"
+
+/* h264 NALU type */
+#define NAL_NON_IDR_SLICE			0x01
+#define NAL_IDR_SLICE				0x05
+#define NAL_H264_SEI				0x06
+#define NAL_H264_SPS				0x07
+#define NAL_H264_PPS				0x08
+#define NAL_H264_AUD				0x09
+
+#define AVC_NAL_TYPE(value)				((value) & 0x1F)
+
+#define BUF_PREDICTION_SZ			(64 * 1024)//(32 * 1024)
+
+#define MB_UNIT_LEN				16
+
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ				64
+
+#define H264_MAX_FB_NUM				17
+#define HDR_PARSING_BUF_SZ			1024
+
+#define HEADER_BUFFER_SIZE			(128 * 1024)
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct h264_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct h264_ring_fb_list - ring frame buffer list
+ * @fb_list   : frame buffer arrary
+ * @read_idx  : read index
+ * @write_idx : write index
+ * @count     : buffer count in list
+ */
+struct h264_ring_fb_list {
+	struct h264_fb fb_list[H264_MAX_FB_NUM];
+	unsigned int read_idx;
+	unsigned int write_idx;
+	unsigned int count;
+	unsigned int reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @realloc_mv_buf	: flag to notify driver to re-allocate mv buffer
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+	uint32_t dpb_sz;
+	uint32_t realloc_mv_buf;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_h264_vsi {
+	unsigned char hdr_buf[HDR_PARSING_BUF_SZ];
+	char *header_buf;
+	int sps_size;
+	int pps_size;
+	int sei_size;
+	int head_offset;
+	struct vdec_h264_dec_info dec;
+	struct vdec_pic_info pic;
+	struct vdec_pic_info cur_pic;
+	struct v4l2_rect crop;
+	bool is_combine;
+	int nalu_pos;
+};
+
+/**
+ * struct vdec_h264_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to aml_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf   : HW working motion vector buffer
+ * @vpu      : VPU instance
+ * @vsi      : VPU shared information
+ */
+struct vdec_h264_inst {
+	unsigned int num_nalu;
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vcodec_mem pred_buf;
+	struct aml_vcodec_mem mv_buf[H264_MAX_FB_NUM];
+	struct aml_vdec_adapt vdec;
+	struct vdec_h264_vsi *vsi;
+	struct vcodec_vfm_s vfm;
+	struct aml_dec_params parms;
+	struct completion comp;
+};
+
+#if 0
+#define DUMP_FILE_NAME "/data/dump/dump.tmp"
+static struct file *filp;
+static loff_t file_pos;
+
+void dump_write(const char __user *buf, size_t count)
+{
+	mm_segment_t old_fs;
+
+	if (!filp)
+		return;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	if (count != vfs_write(filp, buf, count, &file_pos))
+		pr_err("Failed to write file\n");
+
+	set_fs(old_fs);
+}
+
+void dump_init(void)
+{
+	filp = filp_open(DUMP_FILE_NAME, O_CREAT | O_RDWR, 0644);
+	if (IS_ERR(filp)) {
+		pr_err("open dump file failed\n");
+		filp = NULL;
+	}
+}
+
+void dump_deinit(void)
+{
+	if (filp) {
+		filp_close(filp, current->files);
+		filp = NULL;
+		file_pos = 0;
+	}
+}
+
+void swap_uv(void *uv, int size)
+{
+	int i;
+	__u16 *p = uv;
+
+	size /= 2;
+
+	for (i = 0; i < size; i++, p++)
+		*p = __swab16(*p);
+}
+#endif
+
+static void get_pic_info(struct vdec_h264_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"pic(%d, %d), buf(%d, %d)\n",
+		 pic->visible_width, pic->visible_height,
+		 pic->coded_width, pic->coded_height);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"Y(%d, %d), C(%d, %d)\n", pic->y_bs_sz,
+		 pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"l=%d, t=%d, w=%d, h=%d\n",
+		 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz);
+}
+
+static void skip_aud_data(u8 **data, u32 *size)
+{
+	int i;
+
+	i = find_start_code(*data, *size);
+	if (i > 0 && (*data)[i++] == 0x9 && (*data)[i++] == 0xf0) {
+		*size -= i;
+		*data += i;
+	}
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+	u8 *pbuf = parm;
+
+	pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+	pbuf += sprintf(pbuf, "mh264_double_write_mode:16;");
+	pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;");
+
+	return parm - pbuf;
+}
+
+static void vdec_parser_parms(struct vdec_h264_inst *inst)
+{
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	if (ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+		u8 *pbuf = ctx->config.buf;
+
+		pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+		pbuf += sprintf(pbuf, "mh264_double_write_mode:%d;",
+			ctx->config.parm.dec.cfg.double_write_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+			ctx->config.parm.dec.cfg.ref_buf_margin);
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_endian);
+		pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;",
+			ctx->config.parm.dec.cfg.low_latency_mode);
+		ctx->config.length = pbuf - ctx->config.buf;
+	} else {
+		ctx->config.parm.dec.cfg.double_write_mode = 16;
+		ctx->config.parm.dec.cfg.ref_buf_margin = 7;
+		ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+	}
+
+	inst->vdec.config	= ctx->config;
+	inst->parms.cfg		= ctx->config.parm.dec.cfg;
+	inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+static int vdec_h264_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_h264_inst *inst = NULL;
+	int ret = -1;
+	bool dec_init = false;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->vdec.video_type	= VFORMAT_H264;
+	inst->vdec.dev		= ctx->dev->vpu_plat_dev;
+	inst->vdec.filp		= ctx->dev->filp;
+	inst->vdec.ctx		= ctx;
+	inst->ctx		= ctx;
+
+	vdec_parser_parms(inst);
+
+	/* set play mode.*/
+	if (ctx->is_drm_mode)
+		inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+	/* init vfm */
+	inst->vfm.ctx		= ctx;
+	inst->vfm.ada_ctx	= &inst->vdec;
+	ret = vcodec_vfm_init(&inst->vfm);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"init vfm failed.\n");
+		goto err;
+	}
+
+	ret = video_decoder_init(&inst->vdec);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"vdec_h264 init err=%d\n", ret);
+		goto err;
+	}
+	dec_init = true;
+
+	/* probe info from the stream */
+	inst->vsi = kzalloc(sizeof(struct vdec_h264_vsi), GFP_KERNEL);
+	if (!inst->vsi) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* alloc the header buffer to be used cache sps or spp etc.*/
+	inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+	if (!inst->vsi->header_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	init_completion(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"H264 Instance >> %lx", (ulong) inst);
+
+	ctx->ada_ctx	= &inst->vdec;
+	*h_vdec		= (unsigned long)inst;
+
+	//dump_init();
+
+	return 0;
+err:
+	if (dec_init)
+		video_decoder_release(&inst->vdec);
+	if (inst)
+		vcodec_vfm_release(&inst->vfm);
+	if (inst && inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+	if (inst && inst->vsi)
+		kfree(inst->vsi);
+	if (inst)
+		kfree(inst);
+	*h_vdec = 0;
+
+	return ret;
+}
+
+#if 0
+static int refer_buffer_num(int level_idc, int max_poc_cnt,
+	int mb_width, int mb_height)
+{
+	int size;
+	int pic_size = mb_width * mb_height * 384;
+
+	switch (level_idc) {
+	case 9:
+		size = 152064;
+		break;
+	case 10:
+		size = 152064;
+		break;
+	case 11:
+		size = 345600;
+		break;
+	case 12:
+		size = 912384;
+		break;
+	case 13:
+		size = 912384;
+		break;
+	case 20:
+		size = 912384;
+		break;
+	case 21:
+		size = 1824768;
+		break;
+	case 22:
+		size = 3110400;
+		break;
+	case 30:
+		size = 3110400;
+		break;
+	case 31:
+		size = 6912000;
+		break;
+	case 32:
+		size = 7864320;
+		break;
+	case 40:
+		size = 12582912;
+		break;
+	case 41:
+		size = 12582912;
+		break;
+	case 42:
+		size = 13369344;
+		break;
+	case 50:
+		size = 42393600;
+		break;
+	case 51:
+	case 52:
+	default:
+		size = 70778880;
+		break;
+	}
+
+	size /= pic_size;
+	size = size + 1; /* need more buffers */
+
+	if (size > max_poc_cnt)
+		size = max_poc_cnt;
+
+	return size;
+}
+#endif
+
+static void vdec_config_dw_mode(struct vdec_pic_info *pic, int dw_mode)
+{
+	switch (dw_mode) {
+	case 0x1: /* (w x h) + (w/2 x h) */
+		pic->coded_width	+= pic->coded_width >> 1;
+		pic->y_len_sz		= pic->coded_width * pic->coded_height;
+		pic->c_len_sz		= pic->y_len_sz >> 1;
+		break;
+	case 0x2: /* (w x h) + (w/2 x h/2) */
+		pic->coded_width	+= pic->coded_width >> 1;
+		pic->coded_height	+= pic->coded_height >> 1;
+		pic->y_len_sz		= pic->coded_width * pic->coded_height;
+		pic->c_len_sz		= pic->y_len_sz >> 1;
+		break;
+	default: /* nothing to do */
+		break;
+	}
+}
+
+static void fill_vdec_params(struct vdec_h264_inst *inst, struct h264_SPS_t *sps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_h264_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+	int dw = inst->parms.cfg.double_write_mode;
+	int margin = inst->parms.cfg.ref_buf_margin;
+	u32 mb_w, mb_h, width, height;
+
+	mb_w = sps->mb_width;
+	mb_h = sps->mb_height;
+
+	width  = mb_w << 4;
+	height = mb_h << 4;
+
+	width  -= (sps->crop_left + sps->crop_right);
+	height -= (sps->crop_top + sps->crop_bottom);
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= width;
+	pic->visible_height	= height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ALIGN(mb_w, 4) << 4;
+	pic->coded_height	= ALIGN(mb_h, 4) << 4;
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+	pic->profile_idc	= sps->profile_idc;
+	pic->ref_frame_count= sps->ref_frame_count;
+	/* calc DPB size */
+	dec->dpb_sz		= sps->num_reorder_frames + margin;
+
+	inst->parms.ps.visible_width	= pic->visible_width;
+	inst->parms.ps.visible_height	= pic->visible_height;
+	inst->parms.ps.coded_width	= pic->coded_width;
+	inst->parms.ps.coded_height	= pic->coded_height;
+	inst->parms.ps.profile		= sps->profile_idc;
+	inst->parms.ps.mb_width		= sps->mb_width;
+	inst->parms.ps.mb_height	= sps->mb_height;
+	inst->parms.ps.ref_frames	= sps->ref_frame_count;
+	inst->parms.ps.reorder_frames	= sps->num_reorder_frames;
+	inst->parms.ps.dpb_size		= dec->dpb_sz;
+	inst->parms.parms_status	|= V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	vdec_config_dw_mode(pic, dw);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"The stream infos, dw: %d, coded:(%d x %d), visible:(%d x %d), DPB: %d, margin: %d\n",
+		dw, pic->coded_width, pic->coded_height,
+		pic->visible_width, pic->visible_height,
+		dec->dpb_sz - margin, margin);
+}
+
+static bool check_frame_combine(u8 *buf, u32 size, int *pos)
+{
+	bool combine = false;
+	int i = 0, j = 0, cnt = 0;
+	u8 *p = buf;
+
+	for (i = 4; i < size; i++) {
+		j = find_start_code(p, 7);
+		if (j > 0) {
+			if (++cnt > 1) {
+				combine = true;
+				break;
+			}
+
+			*pos = p - buf + j;
+			p += j;
+			i += j;
+		}
+		p++;
+	}
+
+	//pr_info("nal pos: %d, is_combine: %d\n",*pos, *is_combine);
+	return combine;
+}
+
+static int vdec_search_startcode(u8 *buf, u32 range)
+{
+	int pos = -1;
+	int i = 0, j = 0;
+	u8 *p = buf;
+
+	for (i = 4; i < range; i++) {
+		j = find_start_code(p, 7);
+		if (j > 0) {
+			pos = p - buf + j;
+			break;
+		}
+		p++;
+	}
+
+	return pos;
+}
+
+static int parse_stream_ucode(struct vdec_h264_inst *inst,
+			      u8 *buf, u32 size, u64 timestamp)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, timestamp);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_h264_inst *inst,
+	ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle,
+		vdec_vframe_input_free, inst->ctx);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_h264_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0;
+	struct h264_param_sets *ps;
+	int nal_idx = 0;
+	bool is_combine = false;
+
+	is_combine = check_frame_combine(buf, size, &nal_idx);
+	if (nal_idx < 0)
+		return -1;
+
+	/* if the st compose from csd + slice that is the combine data. */
+	inst->vsi->is_combine = is_combine;
+	inst->vsi->nalu_pos = nal_idx;
+
+	ps = vzalloc(sizeof(struct h264_param_sets));
+	if (ps == NULL)
+		return -ENOMEM;
+
+	ret = h264_decode_extradata_ps(buf, size, ps);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"parse extra data failed. err: %d\n", ret);
+		goto out;
+	}
+
+	if (ps->sps_parsed)
+		fill_vdec_params(inst, &ps->sps);
+
+	ret = ps->sps_parsed ? 0 : -1;
+out:
+	vfree(ps);
+
+	return ret;
+}
+
+static int vdec_h264_probe(unsigned long h_vdec,
+	struct aml_vcodec_mem *bs, void *out)
+{
+	struct vdec_h264_inst *inst =
+		(struct vdec_h264_inst *)h_vdec;
+	u8 *buf = (u8 *) bs->vaddr;
+	u32 size = bs->size;
+	int ret = 0;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if ((s->magic != AML_VIDEO_MAGIC) &&
+				(s->type != V4L_STREAM_TYPE_MATEDATA))
+				return -1;
+
+			if (inst->ctx->param_sets_from_ucode) {
+				ret = parse_stream_ucode(inst, s->data,
+					s->len, bs->timestamp);
+			} else {
+				skip_aud_data((u8 **)&s->data, &s->len);
+				ret = parse_stream_cpu(inst, s->data, s->len);
+			}
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = parse_stream_ucode_dma(inst, bs->addr, size,
+				bs->timestamp, BUFF_IDX(bs, bs->index));
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+		} else {
+			skip_aud_data(&buf, &size);
+			ret = parse_stream_cpu(inst, buf, size);
+		}
+	}
+
+	inst->vsi->cur_pic = inst->vsi->pic;
+
+	return ret;
+}
+
+static void vdec_h264_deinit(unsigned long h_vdec)
+{
+	ulong flags;
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	video_decoder_release(&inst->vdec);
+
+	vcodec_vfm_release(&inst->vfm);
+
+	//dump_deinit();
+
+	spin_lock_irqsave(&ctx->slock, flags);
+	if (inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+
+	if (inst->vsi)
+		kfree(inst->vsi);
+
+	kfree(inst);
+
+	ctx->drv_handle = 0;
+	spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int vdec_h264_get_fb(struct vdec_h264_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_h264_get_vf(struct vdec_h264_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	struct vframe_s *vf = NULL;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	vf = peek_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"there is no vframe.\n");
+		*out = NULL;
+		return;
+	}
+
+	vf = get_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"the vframe is avalid.\n");
+		*out = NULL;
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+	if (fb) {
+		fb->vf_handle = (unsigned long)vf;
+		fb->status = FB_ST_DISPLAY;
+	}
+
+	*out = fb;
+
+	//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+	//dump_write(fb->base_y.vaddr, fb->base_y.bytes_used);
+	//dump_write(fb->base_c.vaddr, fb->base_c.bytes_used);
+
+	/* convert yuv format. */
+	//swap_uv(fb->base_c.vaddr, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_h264_inst *inst,
+	u8 *buf, u32 size, u64 ts)
+{
+	int ret = -1;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	bool is_combine = inst->vsi->is_combine;
+	int nalu_pos;
+	u32 nal_type;
+
+	/*print_hex_debug(buf, size, 32);*/
+
+	nalu_pos = vdec_search_startcode(buf, 16);
+	if (nalu_pos < 0)
+		goto err;
+
+	nal_type = AVC_NAL_TYPE(buf[nalu_pos]);
+	//v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "NALU type: %d, size: %u\n", nal_type, size);
+
+	if (nal_type == NAL_H264_SPS && !is_combine) {
+		if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
+			ret = -EILSEQ;
+			goto err;
+		}
+		inst->vsi->sps_size = size;
+		memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
+		inst->vsi->head_offset += inst->vsi->sps_size;
+		ret = size;
+	} else if (nal_type == NAL_H264_PPS && !is_combine) {
+			//buf_sz -= nal_start_idx;
+		if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
+			ret = -EILSEQ;
+			goto err;
+		}
+		inst->vsi->pps_size = size;
+		memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
+		inst->vsi->head_offset += inst->vsi->pps_size;
+		ret = size;
+	} else if (nal_type == NAL_H264_SEI && !is_combine) {
+		if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
+			ret = -EILSEQ;
+			goto err;
+		}
+		inst->vsi->sei_size = size;
+		memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
+		inst->vsi->head_offset += inst->vsi->sei_size;
+		ret = size;
+	} else if (inst->vsi->head_offset == 0) {
+		ret = vdec_vframe_write(vdec, buf, size, ts);
+	} else {
+		char *write_buf = vmalloc(inst->vsi->head_offset + size);
+		if (!write_buf) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		memcpy(write_buf, inst->vsi->header_buf, inst->vsi->head_offset);
+		memcpy(write_buf + inst->vsi->head_offset, buf, size);
+
+		ret = vdec_vframe_write(vdec, write_buf,
+			inst->vsi->head_offset + size, ts);
+
+		memset(inst->vsi->header_buf, 0, HEADER_BUFFER_SIZE);
+		inst->vsi->head_offset = 0;
+		inst->vsi->sps_size = 0;
+		inst->vsi->pps_size = 0;
+		inst->vsi->sei_size = 0;
+
+		vfree(write_buf);
+	}
+
+	return ret;
+err:
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR, "err(%d)", ret);
+	return ret;
+}
+
+static bool monitor_res_change(struct vdec_h264_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0, i = 0, j = 0;
+	u8 *p = buf;
+	int len = size;
+	u32 type;
+
+	for (i = 4; i < size; i++) {
+		j = find_start_code(p, len);
+		if (j > 0) {
+			len = size - (p - buf);
+			type = AVC_NAL_TYPE(p[j]);
+			if (type != NAL_H264_AUD &&
+				(type > NAL_H264_PPS || type < NAL_H264_SEI))
+				break;
+
+			if (type == NAL_H264_SPS) {
+				ret = parse_stream_cpu(inst, p, len);
+				if (ret)
+					break;
+			}
+			p += j;
+		}
+		p++;
+	}
+
+	if (!ret && ((inst->vsi->cur_pic.coded_width !=
+		inst->vsi->pic.coded_width ||
+		inst->vsi->cur_pic.coded_height !=
+		inst->vsi->pic.coded_height) ||
+		(inst->vsi->pic.profile_idc !=
+		inst->vsi->cur_pic.profile_idc) ||
+		(inst->vsi->pic.ref_frame_count !=
+		inst->vsi->cur_pic.ref_frame_count))) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "res change\n");
+		inst->vsi->cur_pic = inst->vsi->pic;
+		return true;
+	}
+
+	return false;
+}
+
+static int vdec_h264_decode(unsigned long h_vdec,
+			    struct aml_vcodec_mem *bs, bool *res_chg)
+{
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *buf;
+	u32 size;
+	int ret = -1;
+
+	if (bs == NULL)
+		return -1;
+
+	buf = (u8 *) bs->vaddr;
+	size = bs->size;
+
+	if (vdec_input_full(vdec))
+		return -EAGAIN;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if (s->magic != AML_VIDEO_MAGIC)
+				return -1;
+
+			if (!inst->ctx->param_sets_from_ucode &&
+				(s->type == V4L_STREAM_TYPE_MATEDATA)) {
+				if ((*res_chg = monitor_res_change(inst,
+					s->data, s->len)))
+					return 0;
+			}
+
+			ret = vdec_vframe_write(vdec,
+				s->data,
+				s->len,
+				bs->timestamp);
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = vdec_vframe_write_with_dma(vdec,
+				bs->addr, size, bs->timestamp,
+				BUFF_IDX(bs, bs->index),
+				vdec_vframe_input_free, inst->ctx);
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			int nal_idx = 0;
+			/* if the st compose from csd + slice that is the combine data. */
+			inst->vsi->is_combine = check_frame_combine(buf, size, &nal_idx);
+			/*if (nal_idx < 0)
+				return -1;*/
+		} else {
+			/*checked whether the resolution changes.*/
+			if ((*res_chg = monitor_res_change(inst, buf, size))) {
+				return 0;
+			}
+		}
+		ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+	}
+
+	return ret;
+}
+
+static void get_param_config_info(struct vdec_h264_inst *inst,
+	struct aml_dec_params *parms)
+{
+	if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO)
+		parms->cfg = inst->parms.cfg;
+	if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO)
+		parms->ps = inst->parms.ps;
+	if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO)
+		parms->hdr = inst->parms.hdr;
+	if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO)
+		parms->cnt = inst->parms.cnt;
+
+	parms->parms_status |= inst->parms.parms_status;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"parms status: %u\n", parms->parms_status);
+}
+
+static int vdec_h264_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	int ret = 0;
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the h264 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_h264_get_vf(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		ret = vdec_h264_get_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	case GET_PARAM_CONFIG_INFO:
+		get_param_config_info(inst, out);
+		break;
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid get parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void set_param_write_sync(struct vdec_h264_inst *inst)
+{
+	complete(&inst->comp);
+}
+
+static void set_param_ps_info(struct vdec_h264_inst *inst,
+	struct aml_vdec_ps_infos *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_h264_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+	int dw = inst->parms.cfg.double_write_mode;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->visible_width;
+	pic->visible_height	= ps->visible_height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ps->coded_width;
+	pic->coded_height	= ps->coded_height;
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+	pic->profile_idc	= ps->profile;
+	pic->ref_frame_count	= ps->ref_frames;
+	pic->field		= ps->field;
+	dec->dpb_sz		= ps->dpb_size;
+
+	inst->parms.ps 	= *ps;
+	inst->parms.parms_status |=
+		V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	vdec_config_dw_mode(pic, dw);
+
+	/*wake up*/
+	complete(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"Parse from ucode, visible(%d x %d), coded(%d x %d) dpb: %d, scan: %s\n",
+		ps->visible_width, ps->visible_height,
+		ps->coded_width, ps->coded_height,
+		dec->dpb_sz,
+		ps->field == V4L2_FIELD_NONE ? "P" : "I");
+}
+
+static void set_param_hdr_info(struct vdec_h264_inst *inst,
+	struct aml_vdec_hdr_infos *hdr)
+{
+	inst->parms.hdr = *hdr;
+	if (!(inst->parms.parms_status &
+		V4L2_CONFIG_PARM_DECODE_HDRINFO)) {
+		inst->parms.hdr = *hdr;
+		inst->parms.parms_status |=
+			V4L2_CONFIG_PARM_DECODE_HDRINFO;
+		aml_vdec_dispatch_event(inst->ctx,
+			V4L2_EVENT_SRC_CH_HDRINFO);
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"H264 set HDR infos\n");
+	}
+}
+
+static void set_param_post_event(struct vdec_h264_inst *inst, u32 *event)
+{
+	aml_vdec_dispatch_event(inst->ctx, *event);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"H264 post event: %d\n", *event);
+}
+
+static int vdec_h264_set_param(unsigned long h_vdec,
+	enum vdec_set_param_type type, void *in)
+{
+	int ret = 0;
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the h264 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case SET_PARAM_WRITE_FRAME_SYNC:
+		set_param_write_sync(inst);
+		break;
+
+	case SET_PARAM_PS_INFO:
+		set_param_ps_info(inst, in);
+		break;
+
+	case SET_PARAM_HDR_INFO:
+		set_param_hdr_info(inst, in);
+		break;
+
+	case SET_PARAM_POST_EVENT:
+		set_param_post_event(inst, in);
+		break;
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid set parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_h264_if = {
+	.init		= vdec_h264_init,
+	.probe		= vdec_h264_probe,
+	.decode		= vdec_h264_decode,
+	.get_param	= vdec_h264_get_param,
+	.set_param	= vdec_h264_set_param,
+	.deinit		= vdec_h264_deinit,
+};
+
+struct vdec_common_if *get_h264_dec_comm_if(void);
+
+struct vdec_common_if *get_h264_dec_comm_if(void)
+{
+	return &vdec_h264_if;
+}
diff --git a/drivers/amvdec_ports/decoder/vdec_hevc_if.c b/drivers/amvdec_ports/decoder/vdec_hevc_if.c
new file mode 100644
index 0000000..2a41276
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_hevc_if.c
@@ -0,0 +1,881 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_drv.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_hevc_parser.h"
+
+#define HEVC_NAL_TYPE(value)				((value >> 1) & 0x3F)
+#define HEADER_BUFFER_SIZE			(32 * 1024)
+
+/**
+ * struct hevc_fb - hevc decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct hevc_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_hevc_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_hevc_dec_info {
+	uint32_t dpb_sz;
+	uint32_t resolution_changed;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_hevc_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf      : Header parsing buffer (AP-W, VPU-R)
+ * @list_free    : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp    : display frame buffer ring list (AP-R, VPU-W)
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_hevc_vsi {
+	char *header_buf;
+	int sps_size;
+	int pps_size;
+	int sei_size;
+	int head_offset;
+	struct vdec_hevc_dec_info dec;
+	struct vdec_pic_info pic;
+	struct vdec_pic_info cur_pic;
+	struct v4l2_rect crop;
+	bool is_combine;
+	int nalu_pos;
+	struct h265_param_sets ps;
+};
+
+/**
+ * struct vdec_hevc_inst - hevc decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to aml_vcodec_ctx
+ * @vsi      : VPU shared information
+ */
+struct vdec_hevc_inst {
+	unsigned int num_nalu;
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vdec_adapt vdec;
+	struct vdec_hevc_vsi *vsi;
+	struct vcodec_vfm_s vfm;
+	struct aml_dec_params parms;
+	struct completion comp;
+};
+
+static void get_pic_info(struct vdec_hevc_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"pic(%d, %d), buf(%d, %d)\n",
+		 pic->visible_width, pic->visible_height,
+		 pic->coded_width, pic->coded_height);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"Y(%d, %d), C(%d, %d)\n", pic->y_bs_sz,
+		 pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_hevc_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"l=%d, t=%d, w=%d, h=%d\n",
+		 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_hevc_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz);
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+	u8 *pbuf = parm;
+
+	pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+	pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:7;");
+	pbuf += sprintf(pbuf, "hevc_double_write_mode:16;");
+	pbuf += sprintf(pbuf, "hevc_buf_width:4096;");
+	pbuf += sprintf(pbuf, "hevc_buf_height:2304;");
+	pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;");
+
+	return parm - pbuf;
+}
+
+static void vdec_parser_parms(struct vdec_hevc_inst *inst)
+{
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	if (ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+		u8 *pbuf = ctx->config.buf;
+
+		pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+		pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+			ctx->config.parm.dec.cfg.ref_buf_margin);
+		pbuf += sprintf(pbuf, "hevc_double_write_mode:%d;",
+			ctx->config.parm.dec.cfg.double_write_mode);
+		pbuf += sprintf(pbuf, "hevc_buf_width:4096;");
+		pbuf += sprintf(pbuf, "hevc_buf_height:2304;");
+		pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_endian);
+		pbuf += sprintf(pbuf, "parm_v4l_low_latency_mode:%d;",
+			ctx->config.parm.dec.cfg.low_latency_mode);
+		ctx->config.length = pbuf - ctx->config.buf;
+	} else {
+		ctx->config.parm.dec.cfg.double_write_mode = 16;
+		ctx->config.parm.dec.cfg.ref_buf_margin = 7;
+		ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+	}
+
+	inst->vdec.config	= ctx->config;
+	inst->parms.cfg		= ctx->config.parm.dec.cfg;
+	inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+static int vdec_hevc_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_hevc_inst *inst = NULL;
+	int ret = -1;
+	bool dec_init = false;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->vdec.video_type	= VFORMAT_HEVC;
+	inst->vdec.dev		= ctx->dev->vpu_plat_dev;
+	inst->vdec.filp		= ctx->dev->filp;
+	inst->vdec.ctx		= ctx;
+	inst->ctx		= ctx;
+
+	vdec_parser_parms(inst);
+
+	/* set play mode.*/
+	if (ctx->is_drm_mode)
+		inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+	/* to eable hevc hw.*/
+	inst->vdec.port.type = PORT_TYPE_HEVC;
+
+	/* init vfm */
+	inst->vfm.ctx		= ctx;
+	inst->vfm.ada_ctx	= &inst->vdec;
+	ret = vcodec_vfm_init(&inst->vfm);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"init vfm failed.\n");
+		goto err;
+	}
+
+	ret = video_decoder_init(&inst->vdec);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"vdec_hevc init err=%d\n", ret);
+		goto err;
+	}
+	dec_init = true;
+
+	/* probe info from the stream */
+	inst->vsi = kzalloc(sizeof(struct vdec_hevc_vsi), GFP_KERNEL);
+	if (!inst->vsi) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* alloc the header buffer to be used cache sps or spp etc.*/
+	inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+	if (!inst->vsi->header_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	init_completion(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"hevc Instance >> %lx\n", (ulong) inst);
+
+	ctx->ada_ctx	= &inst->vdec;
+	*h_vdec		= (unsigned long)inst;
+
+	//dump_init();
+
+	return 0;
+err:
+	if (dec_init)
+		video_decoder_release(&inst->vdec);
+	if (inst)
+		vcodec_vfm_release(&inst->vfm);
+	if (inst && inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+	if (inst && inst->vsi)
+		kfree(inst->vsi);
+	if (inst)
+		kfree(inst);
+	*h_vdec = 0;
+
+	return ret;
+}
+
+
+static int refer_buffer_num(struct h265_SPS_t *sps)
+{
+	int used_buf_num = 0;
+	int sps_pic_buf_diff = 0;
+
+	if ((!sps->temporal_layer[0].num_reorder_pics) &&
+		(sps->temporal_layer[0].max_dec_pic_buffering)) {
+		/* the range of sps_num_reorder_pics_0 is in
+		[0, sps_max_dec_pic_buffering_minus1_0] */
+		used_buf_num = sps->temporal_layer[0].max_dec_pic_buffering;
+	} else
+		used_buf_num = sps->temporal_layer[0].num_reorder_pics;
+
+	sps_pic_buf_diff = sps->temporal_layer[0].max_dec_pic_buffering -
+		sps->temporal_layer[0].num_reorder_pics - 1;
+
+	if (sps_pic_buf_diff >= 4)
+		used_buf_num += 1;
+
+	/*need one more for multi instance, as
+	apply_ref_pic_set() has no chanch to run to
+	to clear referenced flag in some case */
+	used_buf_num++;
+
+	/* for eos add more buffer to flush.*/
+	used_buf_num++;
+
+	return used_buf_num;
+}
+
+static int vdec_get_dw_mode(struct vdec_hevc_inst *inst, int dw_mode)
+{
+	u32 valid_dw_mode = inst->parms.cfg.double_write_mode;
+	int w = inst->parms.cfg.init_width;
+	int h = inst->parms.cfg.init_height;
+	u32 dw = 0x1; /*1:1*/
+
+	switch (valid_dw_mode) {
+	case 0x100:
+		if (w > 1920 && h > 1088)
+			dw = 0x4; /*1:2*/
+		break;
+	case 0x200:
+		if (w > 1920 && h > 1088)
+			dw = 0x2; /*1:4*/
+		break;
+	case 0x300:
+		if (w > 1280 && h > 720)
+			dw = 0x4; /*1:2*/
+		break;
+	default:
+		dw = valid_dw_mode;
+		break;
+	}
+
+	return dw;
+}
+
+static int vdec_pic_scale(struct vdec_hevc_inst *inst, int length, int dw_mode)
+{
+	int ret = 64;
+
+	switch (vdec_get_dw_mode(inst, dw_mode)) {
+	case 0x0: /* only afbc, output afbc */
+		ret = 64;
+		break;
+	case 0x1: /* afbc and (w x h), output YUV420 */
+		ret = length;
+		break;
+	case 0x2: /* afbc and (w/4 x h/4), output YUV420 */
+	case 0x3: /* afbc and (w/4 x h/4), output afbc and YUV420 */
+		ret = length >> 2;
+		break;
+	case 0x4: /* afbc and (w/2 x h/2), output YUV420 */
+		ret = length >> 1;
+		break;
+	case 0x10: /* (w x h), output YUV420-8bit)*/
+	default:
+		ret = length;
+		break;
+	}
+
+	return ret;
+}
+
+static void fill_vdec_params(struct vdec_hevc_inst *inst, struct h265_SPS_t *sps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_hevc_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+	int dw = inst->parms.cfg.double_write_mode;
+	int margin = inst->parms.cfg.ref_buf_margin;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width = sps->width - (sps->output_window.left_offset +
+		sps->output_window.right_offset);
+	pic->visible_height = sps->height - (sps->output_window.top_offset +
+		sps->output_window.bottom_offset);
+	pic->visible_width = vdec_pic_scale(inst, pic->visible_width, dw);
+	pic->visible_height = vdec_pic_scale(inst, pic->visible_height, dw);
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= vdec_pic_scale(inst, ALIGN(sps->width, 32), dw);
+	pic->coded_height	= vdec_pic_scale(inst, ALIGN(sps->height, 32), dw);
+
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+
+	/* calc DPB size */
+	dec->dpb_sz		= refer_buffer_num(sps) + margin;
+
+	inst->parms.ps.visible_width	= pic->visible_width;
+	inst->parms.ps.visible_height	= pic->visible_height;
+	inst->parms.ps.coded_width	= pic->coded_width;
+	inst->parms.ps.coded_height	= pic->coded_height;
+	inst->parms.ps.dpb_size		= dec->dpb_sz;
+	inst->parms.parms_status	|= V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"The stream infos, dw: %d, coded:(%d x %d), visible:(%d x %d), DPB: %d, margin: %d\n",
+		dw, pic->coded_width, pic->coded_height,
+		pic->visible_width, pic->visible_height,
+		dec->dpb_sz - margin, margin);
+}
+
+static int parse_stream_ucode(struct vdec_hevc_inst *inst,
+			      u8 *buf, u32 size, u64 timestamp)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, timestamp);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_hevc_inst *inst,
+	ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle,
+		vdec_vframe_input_free, inst->ctx);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_hevc_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0;
+	struct h265_param_sets *ps = NULL;
+
+	ps = vzalloc(sizeof(struct h265_param_sets));
+	if (ps == NULL)
+		return -ENOMEM;
+
+	ret = h265_decode_extradata_ps(buf, size, ps);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"parse extra data failed. err: %d\n", ret);
+		goto out;
+	}
+
+	if (ps->sps_parsed)
+		fill_vdec_params(inst, &ps->sps);
+
+	ret = ps->sps_parsed ? 0 : -1;
+out:
+	vfree(ps);
+
+	return ret;
+}
+
+static int vdec_hevc_probe(unsigned long h_vdec,
+	struct aml_vcodec_mem *bs, void *out)
+{
+	struct vdec_hevc_inst *inst =
+		(struct vdec_hevc_inst *)h_vdec;
+	u8 *buf = (u8 *)bs->vaddr;
+	u32 size = bs->size;
+	int ret = 0;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if ((s->magic != AML_VIDEO_MAGIC) &&
+				(s->type != V4L_STREAM_TYPE_MATEDATA))
+				return -1;
+
+			if (inst->ctx->param_sets_from_ucode) {
+				ret = parse_stream_ucode(inst, s->data,
+					s->len, bs->timestamp);
+			} else {
+				ret = parse_stream_cpu(inst, s->data, s->len);
+			}
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = parse_stream_ucode_dma(inst, bs->addr, size,
+				bs->timestamp, BUFF_IDX(bs, bs->index));
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+		} else {
+			ret = parse_stream_cpu(inst, buf, size);
+		}
+	}
+
+	inst->vsi->cur_pic = inst->vsi->pic;
+
+	return ret;
+}
+
+static void vdec_hevc_deinit(unsigned long h_vdec)
+{
+	ulong flags;
+	struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	video_decoder_release(&inst->vdec);
+
+	vcodec_vfm_release(&inst->vfm);
+
+	//dump_deinit();
+
+	spin_lock_irqsave(&ctx->slock, flags);
+	if (inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+
+	if (inst->vsi)
+		kfree(inst->vsi);
+
+	kfree(inst);
+
+	ctx->drv_handle = 0;
+	spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int vdec_hevc_get_fb(struct vdec_hevc_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_hevc_get_vf(struct vdec_hevc_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	struct vframe_s *vf = NULL;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	vf = peek_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"there is no vframe.\n");
+		*out = NULL;
+		return;
+	}
+
+	vf = get_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"the vframe is avalid.\n");
+		*out = NULL;
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+	fb->vf_handle = (unsigned long)vf;
+	fb->status = FB_ST_DISPLAY;
+
+	*out = fb;
+
+	//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+	//dump_write(fb->base_y.vaddr, fb->base_y.bytes_used);
+	//dump_write(fb->base_c.vaddr, fb->base_c.bytes_used);
+
+	/* convert yuv format. */
+	//swap_uv(fb->base_c.vaddr, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_hevc_inst *inst,
+	u8 *buf, u32 size, u64 ts)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, ts);
+
+	return ret;
+}
+
+static bool monitor_res_change(struct vdec_hevc_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0, i = 0, j = 0;
+	u8 *p = buf;
+	int len = size;
+	u32 type;
+
+	for (i = 4; i < size; i++) {
+		j = find_start_code(p, len);
+		if (j > 0) {
+			len = size - (p - buf);
+			type = HEVC_NAL_TYPE(p[j]);
+			if (type != HEVC_NAL_AUD &&
+				(type > HEVC_NAL_PPS || type < HEVC_NAL_VPS))
+				break;
+
+			if (type == HEVC_NAL_SPS) {
+				ret = parse_stream_cpu(inst, p, len);
+				if (ret)
+					break;
+			}
+			p += j;
+		}
+		p++;
+	}
+
+	if (!ret && (inst->vsi->cur_pic.coded_width !=
+		inst->vsi->pic.coded_width ||
+		inst->vsi->cur_pic.coded_height !=
+		inst->vsi->pic.coded_height)) {
+		inst->vsi->cur_pic = inst->vsi->pic;
+		return true;
+	}
+
+	return false;
+}
+
+static int vdec_hevc_decode(unsigned long h_vdec,
+			    struct aml_vcodec_mem *bs, bool *res_chg)
+{
+	struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *buf;
+	u32 size;
+	int ret = -1;
+
+	if (bs == NULL)
+		return -1;
+
+	buf = (u8 *) bs->vaddr;
+	size = bs->size;
+
+	if (vdec_input_full(vdec))
+		return -EAGAIN;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if (s->magic != AML_VIDEO_MAGIC)
+				return -1;
+
+			if (!inst->ctx->param_sets_from_ucode &&
+				(s->type == V4L_STREAM_TYPE_MATEDATA)) {
+				if ((*res_chg = monitor_res_change(inst,
+					s->data, s->len)))
+					return 0;
+			}
+
+			ret = vdec_vframe_write(vdec,
+				s->data,
+				s->len,
+				bs->timestamp);
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = vdec_vframe_write_with_dma(vdec,
+				bs->addr, size, bs->timestamp,
+				BUFF_IDX(bs, bs->index),
+				vdec_vframe_input_free, inst->ctx);
+		}
+	} else {
+		if (!inst->ctx->param_sets_from_ucode) {
+			/*checked whether the resolution changes.*/
+			if ((*res_chg = monitor_res_change(inst, buf, size)))
+				return 0;
+		}
+		ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+	}
+
+	return ret;
+}
+
+ static void get_param_config_info(struct vdec_hevc_inst *inst,
+	struct aml_dec_params *parms)
+ {
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO)
+		 parms->cfg = inst->parms.cfg;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO)
+		 parms->ps = inst->parms.ps;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO)
+		 parms->hdr = inst->parms.hdr;
+	 if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO)
+		 parms->cnt = inst->parms.cnt;
+
+	 parms->parms_status |= inst->parms.parms_status;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"parms status: %u\n", parms->parms_status);
+ }
+
+static int vdec_hevc_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	int ret = 0;
+	struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the hevc inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_hevc_get_vf(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		ret = vdec_hevc_get_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	case GET_PARAM_CONFIG_INFO:
+		get_param_config_info(inst, out);
+		break;
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid get parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void set_param_write_sync(struct vdec_hevc_inst *inst)
+{
+	complete(&inst->comp);
+}
+
+static void set_param_ps_info(struct vdec_hevc_inst *inst,
+	struct aml_vdec_ps_infos *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_hevc_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->visible_width;
+	pic->visible_height	= ps->visible_height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+
+	pic->coded_width 	= ps->coded_width;
+	pic->coded_height 	= ps->coded_height;
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+
+	dec->dpb_sz		= ps->dpb_size;
+	pic->field		= ps->field;
+
+	inst->parms.ps		= *ps;
+	inst->parms.parms_status |=
+		V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	/*wake up*/
+	complete(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d, scan: %s\n",
+		pic->visible_width, pic->visible_height,
+		pic->coded_width, pic->coded_height,
+		dec->dpb_sz,
+		pic->field == V4L2_FIELD_NONE ? "P" : "I");
+}
+
+static void set_param_hdr_info(struct vdec_hevc_inst *inst,
+	struct aml_vdec_hdr_infos *hdr)
+{
+	if (!(inst->parms.parms_status &
+		V4L2_CONFIG_PARM_DECODE_HDRINFO)) {
+		inst->parms.hdr = *hdr;
+		inst->parms.parms_status |=
+			V4L2_CONFIG_PARM_DECODE_HDRINFO;
+		aml_vdec_dispatch_event(inst->ctx,
+			V4L2_EVENT_SRC_CH_HDRINFO);
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+			"H265 set HDR infos\n");
+	}
+}
+
+static void set_param_post_event(struct vdec_hevc_inst *inst, u32 *event)
+{
+	aml_vdec_dispatch_event(inst->ctx, *event);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"H265 post event: %d\n", *event);
+}
+
+static int vdec_hevc_set_param(unsigned long h_vdec,
+	enum vdec_set_param_type type, void *in)
+{
+	int ret = 0;
+	struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the hevc inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case SET_PARAM_WRITE_FRAME_SYNC:
+		set_param_write_sync(inst);
+		break;
+
+	case SET_PARAM_PS_INFO:
+		set_param_ps_info(inst, in);
+		break;
+
+	case SET_PARAM_HDR_INFO:
+		set_param_hdr_info(inst, in);
+		break;
+
+	case SET_PARAM_POST_EVENT:
+		set_param_post_event(inst, in);
+		break;
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid set parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_hevc_if = {
+	.init		= vdec_hevc_init,
+	.probe		= vdec_hevc_probe,
+	.decode		= vdec_hevc_decode,
+	.get_param	= vdec_hevc_get_param,
+	.set_param	= vdec_hevc_set_param,
+	.deinit		= vdec_hevc_deinit,
+};
+
+struct vdec_common_if *get_hevc_dec_comm_if(void);
+
+struct vdec_common_if *get_hevc_dec_comm_if(void)
+{
+	return &vdec_hevc_if;
+}
diff --git a/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c b/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c
new file mode 100644
index 0000000..f9816cc
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_mjpeg_if.c
@@ -0,0 +1,668 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mjpeg_parser.h"
+#include <media/v4l2-mem2mem.h>
+
+#define NAL_TYPE(value)				((value) & 0x1F)
+#define HEADER_BUFFER_SIZE			(32 * 1024)
+
+/**
+ * struct mjpeg_fb - mjpeg decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct mjpeg_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_mjpeg_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_mjpeg_dec_info {
+	uint32_t dpb_sz;
+	uint32_t resolution_changed;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_mjpeg_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf      : Header parsing buffer (AP-W, VPU-R)
+ * @list_free    : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp    : display frame buffer ring list (AP-R, VPU-W)
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_mjpeg_vsi {
+	char *header_buf;
+	int sps_size;
+	int pps_size;
+	int sei_size;
+	int head_offset;
+	struct vdec_mjpeg_dec_info dec;
+	struct vdec_pic_info pic;
+	struct vdec_pic_info cur_pic;
+	struct v4l2_rect crop;
+	bool is_combine;
+	int nalu_pos;
+	//struct mjpeg_param_sets ps;
+};
+
+/**
+ * struct vdec_mjpeg_inst - mjpeg decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to aml_vcodec_ctx
+ * @vsi      : VPU shared information
+ */
+struct vdec_mjpeg_inst {
+	unsigned int num_nalu;
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vdec_adapt vdec;
+	struct vdec_mjpeg_vsi *vsi;
+	struct vcodec_vfm_s vfm;
+	struct aml_dec_params parms;
+	struct completion comp;
+};
+
+static void get_pic_info(struct vdec_mjpeg_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"pic(%d, %d), buf(%d, %d)\n",
+		 pic->visible_width, pic->visible_height,
+		 pic->coded_width, pic->coded_height);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"Y(%d, %d), C(%d, %d)\n",
+		pic->y_bs_sz, pic->y_len_sz,
+		pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_mjpeg_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"l=%d, t=%d, w=%d, h=%d\n",
+		 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_mjpeg_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"sz=%d\n", *dpb_sz);
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+	u8 *pbuf = parm;
+
+	pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:0;");
+
+	return pbuf - parm;
+}
+
+static void vdec_parser_parms(struct vdec_mjpeg_inst *inst)
+{
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	if (ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+		u8 *pbuf = ctx->config.buf;
+
+		pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+			ctx->config.parm.dec.cfg.ref_buf_margin);
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_endian);
+		ctx->config.length = pbuf - ctx->config.buf;
+	} else {
+		ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+	}
+
+	inst->vdec.config	= ctx->config;
+	inst->parms.cfg		= ctx->config.parm.dec.cfg;
+	inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+
+static int vdec_mjpeg_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_mjpeg_inst *inst = NULL;
+	int ret = -1;
+	bool dec_init = false;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->vdec.video_type	= VFORMAT_MJPEG;
+	inst->vdec.dev		= ctx->dev->vpu_plat_dev;
+	inst->vdec.filp		= ctx->dev->filp;
+	inst->vdec.config	= ctx->config;
+	inst->vdec.ctx		= ctx;
+	inst->ctx		= ctx;
+
+	vdec_parser_parms(inst);
+	/* set play mode.*/
+	if (ctx->is_drm_mode)
+		inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+	/* to eable mjpeg hw.*/
+	inst->vdec.port.type = PORT_TYPE_VIDEO;
+
+	/* init vfm */
+	inst->vfm.ctx	= ctx;
+	inst->vfm.ada_ctx = &inst->vdec;
+	ret = vcodec_vfm_init(&inst->vfm);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"init vfm failed.\n");
+		goto err;
+	}
+
+	ret = video_decoder_init(&inst->vdec);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"vdec_mjpeg init err=%d\n", ret);
+		goto err;
+	}
+	dec_init = true;
+
+	/* probe info from the stream */
+	inst->vsi = kzalloc(sizeof(struct vdec_mjpeg_vsi), GFP_KERNEL);
+	if (!inst->vsi) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* alloc the header buffer to be used cache sps or spp etc.*/
+	inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+	if (!inst->vsi->header_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"mjpeg Instance >> %lx\n", (ulong) inst);
+
+	init_completion(&inst->comp);
+	ctx->ada_ctx	= &inst->vdec;
+	*h_vdec		= (unsigned long)inst;
+
+	//dump_init();
+
+	return 0;
+
+err:
+	if (dec_init)
+		video_decoder_release(&inst->vdec);
+	if (inst)
+		vcodec_vfm_release(&inst->vfm);
+	if (inst && inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+	if (inst && inst->vsi)
+		kfree(inst->vsi);
+	if (inst)
+		kfree(inst);
+	*h_vdec = 0;
+
+	return ret;
+}
+
+#if 0
+static int refer_buffer_num(int level_idc, int poc_cnt,
+	int mb_width, int mb_height)
+{
+	return 20;
+}
+#endif
+
+static void fill_vdec_params(struct vdec_mjpeg_inst *inst,
+	struct MJpegDecodeContext *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_mjpeg_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->width;
+	pic->visible_height	= ps->height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ALIGN(ps->width, 64);
+	pic->coded_height	= ALIGN(ps->height, 64);
+
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz;
+
+	/*8(DECODE_BUFFER_NUM_DEF) */
+	dec->dpb_sz = 8;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+		pic->coded_width, pic->coded_height,
+		pic->visible_width, pic->visible_height, dec->dpb_sz);
+}
+
+static int parse_stream_ucode(struct vdec_mjpeg_inst *inst,
+			      u8 *buf, u32 size, u64 timestamp)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, timestamp);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_mjpeg_inst *inst,
+	ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle,
+		vdec_vframe_input_free, inst->ctx);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_mjpeg_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0;
+	struct mjpeg_param_sets *ps = NULL;
+
+	ps = kzalloc(sizeof(struct mjpeg_param_sets), GFP_KERNEL);
+	if (ps == NULL)
+		return -ENOMEM;
+
+	ret = mjpeg_decode_extradata_ps(buf, size, ps);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"parse extra data failed. err: %d\n", ret);
+		goto out;
+	}
+
+	if (ps->head_parsed)
+		fill_vdec_params(inst, &ps->dec_ps);
+
+	ret = ps->head_parsed ? 0 : -1;
+out:
+	kfree(ps);
+
+	return ret;
+}
+
+static int vdec_mjpeg_probe(unsigned long h_vdec,
+	struct aml_vcodec_mem *bs, void *out)
+{
+	struct vdec_mjpeg_inst *inst =
+		(struct vdec_mjpeg_inst *)h_vdec;
+	u8 *buf = (u8 *)bs->vaddr;
+	u32 size = bs->size;
+	int ret = 0;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if ((s->magic != AML_VIDEO_MAGIC) &&
+				(s->type != V4L_STREAM_TYPE_MATEDATA))
+				return -1;
+
+			if (inst->ctx->param_sets_from_ucode) {
+				ret = parse_stream_ucode(inst, s->data,
+					s->len, bs->timestamp);
+			} else {
+				ret = parse_stream_cpu(inst, s->data, s->len);
+			}
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = parse_stream_ucode_dma(inst, bs->addr, size,
+				bs->timestamp, BUFF_IDX(bs, bs->index));
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+		} else {
+			ret = parse_stream_cpu(inst, buf, size);
+		}
+	}
+
+	inst->vsi->cur_pic = inst->vsi->pic;
+
+	return ret;
+}
+
+static void vdec_mjpeg_deinit(unsigned long h_vdec)
+{
+	struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+	if (!inst)
+		return;
+
+	video_decoder_release(&inst->vdec);
+
+	vcodec_vfm_release(&inst->vfm);
+
+	//dump_deinit();
+
+	if (inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+
+	if (inst->vsi)
+		kfree(inst->vsi);
+
+	kfree(inst);
+}
+
+static int vdec_mjpeg_get_fb(struct vdec_mjpeg_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_mjpeg_get_vf(struct vdec_mjpeg_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	struct vframe_s *vf = NULL;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	vf = peek_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"there is no vframe.\n");
+		*out = NULL;
+		return;
+	}
+
+	vf = get_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"the vframe is avalid.\n");
+		*out = NULL;
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+	fb->vf_handle = (unsigned long)vf;
+	fb->status = FB_ST_DISPLAY;
+
+	*out = fb;
+
+	//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+	//dump_write(fb->base_y.va, fb->base_y.bytes_used);
+	//dump_write(fb->base_c.va, fb->base_c.bytes_used);
+
+	/* convert yuv format. */
+	//swap_uv(fb->base_c.va, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_mjpeg_inst *inst,
+	u8 *buf, u32 size, u64 ts)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, ts);
+
+	return ret;
+}
+
+static int vdec_mjpeg_decode(unsigned long h_vdec,
+			     struct aml_vcodec_mem *bs, bool *res_chg)
+{
+	struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *buf = (u8 *) bs->vaddr;
+	u32 size = bs->size;
+	int ret = -1;
+
+	if (vdec_input_full(vdec))
+		return -EAGAIN;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if (s->magic != AML_VIDEO_MAGIC)
+				return -1;
+
+			ret = vdec_vframe_write(vdec,
+				s->data,
+				s->len,
+				bs->timestamp);
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = vdec_vframe_write_with_dma(vdec,
+				bs->addr, size, bs->timestamp,
+				BUFF_IDX(bs, bs->index),
+				vdec_vframe_input_free, inst->ctx);
+		}
+	} else {
+		ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+	}
+
+	return ret;
+}
+
+static int vdec_mjpeg_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	int ret = 0;
+	struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the mjpeg inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_mjpeg_get_vf(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		ret = vdec_mjpeg_get_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid get parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void set_param_ps_info(struct vdec_mjpeg_inst *inst,
+	struct aml_vdec_ps_infos *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_mjpeg_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO, "%s in\n", __func__);
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->visible_width;
+	pic->visible_height	= ps->visible_height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ps->coded_width;
+	pic->coded_height	= ps->coded_height;
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz;
+
+	dec->dpb_sz		= ps->dpb_size;
+
+	inst->parms.ps 	= *ps;
+	inst->parms.parms_status |=
+		V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	/*wake up*/
+	complete(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+		ps->visible_width, ps->visible_height,
+		ps->coded_width, ps->coded_height,
+		dec->dpb_sz);
+}
+
+static void set_param_write_sync(struct vdec_mjpeg_inst *inst)
+{
+	complete(&inst->comp);
+}
+
+static int vdec_mjpeg_set_param(unsigned long h_vdec,
+	enum vdec_set_param_type type, void *in)
+{
+	int ret = 0;
+	struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the mjpeg inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case SET_PARAM_WRITE_FRAME_SYNC:
+		set_param_write_sync(inst);
+		break;
+	case SET_PARAM_PS_INFO:
+		set_param_ps_info(inst, in);
+		break;
+
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid set parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_mjpeg_if = {
+	.init		= vdec_mjpeg_init,
+	.probe		= vdec_mjpeg_probe,
+	.decode		= vdec_mjpeg_decode,
+	.get_param	= vdec_mjpeg_get_param,
+	.set_param	= vdec_mjpeg_set_param,
+	.deinit		= vdec_mjpeg_deinit,
+};
+
+struct vdec_common_if *get_mjpeg_dec_comm_if(void);
+
+struct vdec_common_if *get_mjpeg_dec_comm_if(void)
+{
+	return &vdec_mjpeg_if;
+}
diff --git a/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c b/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c
new file mode 100644
index 0000000..9a38e71
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_mpeg12_if.c
@@ -0,0 +1,656 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mpeg12_parser.h"
+
+#define NAL_TYPE(value)				((value) & 0x1F)
+#define HEADER_BUFFER_SIZE			(32 * 1024)
+
+/**
+ * struct mpeg12_fb - mpeg12 decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct mpeg12_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_mpeg12_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_mpeg12_dec_info {
+	uint32_t dpb_sz;
+	uint32_t resolution_changed;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_mpeg12_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf      : Header parsing buffer (AP-W, VPU-R)
+ * @list_free    : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp    : display frame buffer ring list (AP-R, VPU-W)
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_mpeg12_vsi {
+	char *header_buf;
+	int sps_size;
+	int pps_size;
+	int sei_size;
+	int head_offset;
+	struct vdec_mpeg12_dec_info dec;
+	struct vdec_pic_info pic;
+	struct vdec_pic_info cur_pic;
+	struct v4l2_rect crop;
+	bool is_combine;
+	int nalu_pos;
+	//struct mpeg12_param_sets ps;
+};
+
+/**
+ * struct vdec_mpeg12_inst - mpeg12 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to aml_vcodec_ctx
+ * @vsi      : VPU shared information
+ */
+struct vdec_mpeg12_inst {
+	unsigned int num_nalu;
+	struct aml_vcodec_ctx *ctx;
+	struct aml_vdec_adapt vdec;
+	struct vdec_mpeg12_vsi *vsi;
+	struct vcodec_vfm_s vfm;
+	struct aml_dec_params parms;
+	struct completion comp;
+};
+
+static void get_pic_info(struct vdec_mpeg12_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"pic(%d, %d), buf(%d, %d)\n",
+		 pic->visible_width, pic->visible_height,
+		 pic->coded_width, pic->coded_height);
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"Y(%d, %d), C(%d, %d)\n",
+		pic->y_bs_sz, pic->y_len_sz,
+		pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_mpeg12_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+		"l=%d, t=%d, w=%d, h=%d\n",
+		 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_mpeg12_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz);
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+	u8 *pbuf = parm;
+
+	pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+	pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+	pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:0;");
+
+	return pbuf - parm;
+}
+
+static void vdec_parser_parms(struct vdec_mpeg12_inst *inst)
+{
+	struct aml_vcodec_ctx *ctx = inst->ctx;
+
+	if (ctx->config.parm.dec.parms_status &
+		V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+		u8 *pbuf = ctx->config.buf;
+
+		pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+		pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+			ctx->config.parm.dec.cfg.canvas_mem_mode);
+		pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+			ctx->config.parm.dec.cfg.ref_buf_margin);
+		ctx->config.length = pbuf - ctx->config.buf;
+	} else {
+		ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+	}
+
+	inst->vdec.config	= ctx->config;
+	inst->parms.cfg		= ctx->config.parm.dec.cfg;
+	inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+static int vdec_mpeg12_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_mpeg12_inst *inst = NULL;
+	int ret = -1;
+	bool dec_init = false;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->vdec.video_type	= VFORMAT_MPEG12;
+	inst->vdec.dev		= ctx->dev->vpu_plat_dev;
+	inst->vdec.filp		= ctx->dev->filp;
+	inst->vdec.config	= ctx->config;
+	inst->vdec.ctx		= ctx;
+	inst->ctx		= ctx;
+
+	vdec_parser_parms(inst);
+
+	/* set play mode.*/
+	if (ctx->is_drm_mode)
+		inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+	/* to eable mpeg12 hw.*/
+	inst->vdec.port.type = PORT_TYPE_VIDEO;
+
+	/* init vfm */
+	inst->vfm.ctx		= ctx;
+	inst->vfm.ada_ctx	= &inst->vdec;
+	ret = vcodec_vfm_init(&inst->vfm);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"init vfm failed.\n");
+		goto err;
+	}
+
+	ret = video_decoder_init(&inst->vdec);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"vdec_mpeg12 init err=%d\n", ret);
+		goto err;
+	}
+	dec_init = true;
+
+	/* probe info from the stream */
+	inst->vsi = kzalloc(sizeof(struct vdec_mpeg12_vsi), GFP_KERNEL);
+	if (!inst->vsi) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* alloc the header buffer to be used cache sps or spp etc.*/
+	inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+	if (!inst->vsi->header_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"mpeg12 Instance >> %lx\n", (ulong) inst);
+	init_completion(&inst->comp);
+	ctx->ada_ctx	= &inst->vdec;
+	*h_vdec		= (unsigned long)inst;
+
+	//dump_init();
+
+	return 0;
+
+err:
+	if (dec_init)
+		video_decoder_release(&inst->vdec);
+	if (inst)
+		vcodec_vfm_release(&inst->vfm);
+	if (inst && inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+	if (inst && inst->vsi)
+		kfree(inst->vsi);
+	if (inst)
+		kfree(inst);
+	*h_vdec = 0;
+
+	return ret;
+}
+
+static void fill_vdec_params(struct vdec_mpeg12_inst *inst,
+	struct MpvParseContext *dec_ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_mpeg12_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= dec_ps->width;
+	pic->visible_height	= dec_ps->height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ALIGN(dec_ps->coded_width, 64);
+	pic->coded_height	= ALIGN(dec_ps->coded_height, 32);
+
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+
+	/*7(parm_v4l_buffer_margin) + 8(DECODE_BUFFER_NUM_DEF)*/
+	dec->dpb_sz = 15;
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_BUFMGR,
+		"The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+		pic->coded_width, pic->coded_height,
+		pic->visible_width, pic->visible_height, dec->dpb_sz);
+}
+
+static int parse_stream_ucode(struct vdec_mpeg12_inst *inst,
+			      u8 *buf, u32 size, u64 timestamp)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, timestamp);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_mpeg12_inst *inst,
+	ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle,
+		vdec_vframe_input_free, inst->ctx);
+	if (ret < 0) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"write frame data failed. err: %d\n", ret);
+		return ret;
+	}
+
+	/* wait ucode parse ending. */
+	wait_for_completion_timeout(&inst->comp,
+		msecs_to_jiffies(1000));
+
+	return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_mpeg12_inst *inst, u8 *buf, u32 size)
+{
+	int ret = 0;
+	struct mpeg12_param_sets *ps = NULL;
+
+	ps = kzalloc(sizeof(struct mpeg12_param_sets), GFP_KERNEL);
+	if (ps == NULL)
+		return -ENOMEM;
+
+	ret = mpeg12_decode_extradata_ps(buf, size, ps);
+	if (ret) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"parse extra data failed. err: %d\n", ret);
+		goto out;
+	}
+
+	if (ps->head_parsed)
+		fill_vdec_params(inst, &ps->dec_ps);
+
+	ret = ps->head_parsed ? 0 : -1;
+out:
+	kfree(ps);
+
+	return ret;
+}
+
+static int vdec_mpeg12_probe(unsigned long h_vdec,
+	struct aml_vcodec_mem *bs, void *out)
+{
+	struct vdec_mpeg12_inst *inst =
+		(struct vdec_mpeg12_inst *)h_vdec;
+	u8 *buf = (u8 *)bs->vaddr;
+	u32 size = bs->size;
+	int ret = 0;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if ((s->magic != AML_VIDEO_MAGIC) &&
+				(s->type != V4L_STREAM_TYPE_MATEDATA))
+				return -1;
+
+			if (inst->ctx->param_sets_from_ucode) {
+				ret = parse_stream_ucode(inst, s->data,
+					s->len, bs->timestamp);
+			} else {
+				ret = parse_stream_cpu(inst, s->data, s->len);
+			}
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = parse_stream_ucode_dma(inst, bs->addr, size,
+				bs->timestamp, BUFF_IDX(bs, bs->index));
+		}
+	} else {
+		if (inst->ctx->param_sets_from_ucode) {
+			ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+		} else {
+			ret = parse_stream_cpu(inst, buf, size);
+		}
+	}
+
+	inst->vsi->cur_pic = inst->vsi->pic;
+
+	return ret;
+}
+
+static void vdec_mpeg12_deinit(unsigned long h_vdec)
+{
+	struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+	if (!inst)
+		return;
+
+	video_decoder_release(&inst->vdec);
+
+	vcodec_vfm_release(&inst->vfm);
+
+	//dump_deinit();
+
+	if (inst->vsi && inst->vsi->header_buf)
+		kfree(inst->vsi->header_buf);
+
+	if (inst->vsi)
+		kfree(inst->vsi);
+
+	kfree(inst);
+}
+
+static int vdec_mpeg12_get_fb(struct vdec_mpeg12_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_mpeg12_get_vf(struct vdec_mpeg12_inst *inst, struct vdec_v4l2_buffer **out)
+{
+	struct vframe_s *vf = NULL;
+	struct vdec_v4l2_buffer *fb = NULL;
+
+	vf = peek_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"there is no vframe.\n");
+		*out = NULL;
+		return;
+	}
+
+	vf = get_video_frame(&inst->vfm);
+	if (!vf) {
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"the vframe is avalid.\n");
+		*out = NULL;
+		return;
+	}
+
+	atomic_set(&vf->use_cnt, 1);
+
+	fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+	fb->vf_handle = (unsigned long)vf;
+	fb->status = FB_ST_DISPLAY;
+
+	*out = fb;
+
+	//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+	//dump_write(fb->base_y.va, fb->base_y.bytes_used);
+	//dump_write(fb->base_c.va, fb->base_c.bytes_used);
+
+	/* convert yuv format. */
+	//swap_uv(fb->base_c.va, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_mpeg12_inst *inst,
+	u8 *buf, u32 size, u64 ts)
+{
+	int ret = 0;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+
+	ret = vdec_vframe_write(vdec, buf, size, ts);
+
+	return ret;
+}
+
+static int vdec_mpeg12_decode(unsigned long h_vdec,
+			      struct aml_vcodec_mem *bs, bool *res_chg)
+{
+	struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+	struct aml_vdec_adapt *vdec = &inst->vdec;
+	u8 *buf = (u8 *) bs->vaddr;
+	u32 size = bs->size;
+	int ret = -1;
+
+	if (vdec_input_full(vdec))
+		return -EAGAIN;
+
+	if (inst->ctx->is_drm_mode) {
+		if (bs->model == VB2_MEMORY_MMAP) {
+			struct aml_video_stream *s =
+				(struct aml_video_stream *) buf;
+
+			if (s->magic != AML_VIDEO_MAGIC)
+				return -1;
+
+			ret = vdec_vframe_write(vdec,
+				s->data,
+				s->len,
+				bs->timestamp);
+		} else if (bs->model == VB2_MEMORY_DMABUF ||
+			bs->model == VB2_MEMORY_USERPTR) {
+			ret = vdec_vframe_write_with_dma(vdec,
+				bs->addr, size, bs->timestamp,
+				BUFF_IDX(bs, bs->index),
+				vdec_vframe_input_free, inst->ctx);
+		}
+	} else {
+		ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+	}
+
+	return ret;
+}
+
+static int vdec_mpeg12_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	int ret = 0;
+	struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the mpeg12 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_mpeg12_get_vf(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		ret = vdec_mpeg12_get_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid get parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void set_param_write_sync(struct vdec_mpeg12_inst *inst)
+{
+	complete(&inst->comp);
+}
+
+static void set_param_ps_info(struct vdec_mpeg12_inst *inst,
+	struct aml_vdec_ps_infos *ps)
+{
+	struct vdec_pic_info *pic = &inst->vsi->pic;
+	struct vdec_mpeg12_dec_info *dec = &inst->vsi->dec;
+	struct v4l2_rect *rect = &inst->vsi->crop;
+
+	/* fill visible area size that be used for EGL. */
+	pic->visible_width	= ps->visible_width;
+	pic->visible_height	= ps->visible_height;
+
+	/* calc visible ares. */
+	rect->left		= 0;
+	rect->top		= 0;
+	rect->width		= pic->visible_width;
+	rect->height		= pic->visible_height;
+
+	/* config canvas size that be used for decoder. */
+	pic->coded_width	= ps->coded_width;
+	pic->coded_height	= ps->coded_height;
+	pic->y_len_sz		= pic->coded_width * pic->coded_height;
+	pic->c_len_sz		= pic->y_len_sz >> 1;
+
+	dec->dpb_sz		= ps->dpb_size;
+	pic->field		= ps->field;
+
+	inst->parms.ps 	= *ps;
+	inst->parms.parms_status |=
+		V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+	/*wake up*/
+	complete(&inst->comp);
+
+	v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+		"Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d scan: %s\n",
+		ps->visible_width, ps->visible_height,
+		ps->coded_width, ps->coded_height,
+		dec->dpb_sz,
+		pic->field == V4L2_FIELD_NONE ? "P" : "I");
+}
+
+static int vdec_mpeg12_set_param(unsigned long h_vdec,
+	enum vdec_set_param_type type, void *in)
+{
+	int ret = 0;
+	struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+	if (!inst) {
+		v4l_dbg(0, V4L_DEBUG_CODEC_ERROR,
+			"the mpeg12 inst of dec is invalid.\n");
+		return -1;
+	}
+
+	switch (type) {
+	case SET_PARAM_WRITE_FRAME_SYNC:
+		set_param_write_sync(inst);
+		break;
+	case SET_PARAM_PS_INFO:
+		set_param_ps_info(inst, in);
+		break;
+
+	default:
+		v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+			"invalid set parameter type=%d\n", type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_mpeg12_if = {
+	.init		= vdec_mpeg12_init,
+	.probe		= vdec_mpeg12_probe,
+	.decode		= vdec_mpeg12_decode,
+	.get_param	= vdec_mpeg12_get_param,
+	.set_param	= vdec_mpeg12_set_param,
+	.deinit		= vdec_mpeg12_deinit,
+};
+
+struct vdec_common_if *get_mpeg12_dec_comm_if(void);
+
+struct vdec_common_if *get_mpeg12_dec_comm_if(void)
+{
+	return &vdec_mpeg12_if;
+}
diff --git a/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c b/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c
new file mode 100644
index 0000000..c47bafc
--- /dev/null
+++ b/drivers/amvdec_ports/decoder/vdec_mpeg4_if.c
@@ -0,0 +1,667 @@
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mpeg4_parser.h"
+
+#define NAL_TYPE(value)				((value) & 0x1F)
+#define HEADER_BUFFER_SIZE			(32 * 1024)
+
+/**
+ * struct mpeg4_fb - mpeg4 decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct mpeg4_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_mpeg4_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address