Support AVC short format for protected video playback

BZ: 97375

Support AVC short format for protected video playback on BYT platform.

Change-Id: I83f677be0be60f0cd1e194ca5d5c0df7205f8d7f
Signed-off-by: wfeng6 <wei.feng@intel.com>
Reviewed-on: http://android.intel.com:8080/100296
Reviewed-by: Shi, PingX <pingx.shi@intel.com>
Reviewed-by: Akula, VarshaX A <varshax.a.akula@intel.com>
Reviewed-by: Zurcher, Paul <paul.zurcher@intel.com>
Reviewed-by: Poornachandran, Rajesh <rajesh.poornachandran@intel.com>
Tested-by: Sun, Hang L <hang.l.sun@intel.com>
Reviewed-by: buildbot <buildbot@intel.com>
Tested-by: buildbot <buildbot@intel.com>
diff --git a/mix_vbp/viddec_fw/fw/codecs/h264/include/h264.h b/mix_vbp/viddec_fw/fw/codecs/h264/include/h264.h
index d6261d2..eac5541 100755
--- a/mix_vbp/viddec_fw/fw/codecs/h264/include/h264.h
+++ b/mix_vbp/viddec_fw/fw/codecs/h264/include/h264.h
@@ -1045,7 +1045,71 @@
 }
 #endif
 
+#ifdef USE_AVC_SHORT_FORMAT
+#define MAX_OP  16
 
+typedef struct _dec_ref_pic_marking_t {
+    union {
+        uint8_t flags;
+        struct {
+            uint8_t idr_pic_flag:1;
+            uint8_t no_output_of_prior_pics_flag:1;
+            uint8_t long_term_reference_flag:1;
+            uint8_t adaptive_ref_pic_marking_mode_flag:1;
+        };
+    };
+    struct {
+        uint8_t memory_management_control_operation;
+        union {
+            struct {
+                uint8_t difference_of_pic_nums_minus1;
+            } op1;
+            struct {
+                uint8_t long_term_pic_num;
+            } op2;
+            struct {
+                uint8_t difference_of_pic_nums_minus1;
+                uint8_t long_term_frame_idx;
+            } op3;
+            struct {
+                uint8_t max_long_term_frame_idx_plus1;
+            } op4;
+            struct {
+                uint8_t long_term_frame_idx;
+            } op6;
+        };
+    } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+
+typedef struct _slice_header_t {
+    uint8_t nal_unit_type;
+    uint8_t pps_id;
+    uint8_t padding;
+    union {
+        uint8_t flags;
+        struct {
+            uint8_t field_pic_flag:1;
+            uint8_t bottom_field_flag:1;
+        };
+    };
+    uint32_t first_mb_in_slice;
+    uint32_t frame_num;
+    uint16_t idr_pic_id;
+    uint16_t pic_order_cnt_lsb;
+    int32_t delta_pic_order_cnt[2];
+    int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+
+
+
+typedef struct _vbp_h264_sliceheader {
+    slice_header_t          slice_header;
+    dec_ref_pic_marking_t   ref_pic_marking;
+} vbp_h264_sliceheader;
+
+#endif
 
 
 
diff --git a/mix_vbp/viddec_fw/fw/codecs/h264/parser/Android.mk b/mix_vbp/viddec_fw/fw/codecs/h264/parser/Android.mk
index d3e4910..62fe53d 100644
--- a/mix_vbp/viddec_fw/fw/codecs/h264/parser/Android.mk
+++ b/mix_vbp/viddec_fw/fw/codecs/h264/parser/Android.mk
@@ -30,3 +30,36 @@
 	libmixvbp
 
 include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+PLATFORM_SUPPORT_AVC_SHORT_FORMAT := baytrail
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_AVC_SHORT_FORMAT)),)
+LOCAL_SRC_FILES := \
+        h264parse.c \
+        h264parse_bsd.c \
+        h264parse_math.c \
+        h264parse_mem.c \
+        h264parse_sei.c \
+        h264parse_pps.c \
+        h264parse_sps.c \
+        h264parse_dpb.c \
+        h264parse_sh.c \
+        viddec_h264secure_parse.c \
+        mix_vbp_h264_stubs.c
+
+LOCAL_CFLAGS := -DVBP -DHOST_ONLY -DUSE_AVC_SHORT_FORMAT
+
+LOCAL_C_INCLUDES :=    \
+        $(VENDORS_INTEL_MRST_MIXVBP_ROOT)/viddec_fw/include   \
+        $(VENDORS_INTEL_MRST_MIXVBP_ROOT)/viddec_fw/fw/include   \
+        $(VENDORS_INTEL_MRST_MIXVBP_ROOT)/viddec_fw/fw/parser/include   \
+        $(VENDORS_INTEL_MRST_MIXVBP_ROOT)/viddec_fw/fw/codecs/h264/include
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := libmixvbp_h264secure
+LOCAL_SHARED_LIBRARIES := libmixvbp
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/mix_vbp/viddec_fw/fw/codecs/h264/parser/h264parse_dpb.c b/mix_vbp/viddec_fw/fw/codecs/h264/parser/h264parse_dpb.c
index 010e77b..4415d54 100755
--- a/mix_vbp/viddec_fw/fw/codecs/h264/parser/h264parse_dpb.c
+++ b/mix_vbp/viddec_fw/fw/codecs/h264/parser/h264parse_dpb.c
@@ -2731,7 +2731,7 @@
                 h264_dpb_unmark_for_reference(p_dpb, p_dpb->active_fs->fs_idc);
                 h264_dpb_remove_ref_list(p_dpb, p_dpb->active_fs->fs_idc);
                 //h264_send_new_display_frame(0x01); //send ignore_frame signal to Host
-
+#ifndef USE_AVC_SHORT_FORMAT
                 ///  Add into drop-out list for all frms in dpb without display
                 if (!(viddec_h264_get_is_non_existent(p_dpb->active_fs)))   {
                     if ( viddec_h264_get_is_output(&(p_dpb->fs[p_dpb->fs_dpb_idc[idx]])) ) {			//// This frame has been displayed but not released
@@ -2742,6 +2742,7 @@
                         p_dpb->frame_numbers_need_to_be_dropped ++;
                     }
                 }
+#endif
             }
 
         }
@@ -2951,13 +2952,14 @@
     h264_dpb_set_active_fs(p_dpb, fs_idc);
     viddec_h264_set_is_frame_used(p_dpb->active_fs, 0);
 
+#ifndef USE_AVC_SHORT_FORMAT
     //add to support frame relocation interface to host
     if (!(viddec_h264_get_is_non_existent(p_dpb->active_fs)))
     {
         p_dpb->frame_id_need_to_be_removed[p_dpb->frame_numbers_need_to_be_removed] = p_dpb->fs[fs_idc].fs_idc;
         p_dpb->frame_numbers_need_to_be_removed ++;
     }
-
+#endif
     ///////////////////////////////////////// Reset FS
     p_dpb->fs[fs_idc].fs_idc = MPD_DPB_FS_NULL_IDC;
 
@@ -3305,9 +3307,10 @@
     if (viddec_h264_get_is_non_existent(p_dpb->active_fs) == 0)
     {
         *existing = 1;
+#ifndef USE_AVC_SHORT_FORMAT
         p_dpb->frame_id_need_to_be_displayed[p_dpb->frame_numbers_need_to_be_displayed]=p_dpb->active_fs->fs_idc;
         p_dpb->frame_numbers_need_to_be_displayed++;
-
+#endif
         //if(direct)
         //h264_dpb_remove_frame_from_dpb(p_dpb, p_dpb->active_fs->fs_idc);		// Remove dpb.fs_dpb_idc[pos]
     }
diff --git a/mix_vbp/viddec_fw/fw/codecs/h264/parser/viddec_h264secure_parse.c b/mix_vbp/viddec_fw/fw/codecs/h264/parser/viddec_h264secure_parse.c
new file mode 100644
index 0000000..55225ed
--- /dev/null
+++ b/mix_vbp/viddec_fw/fw/codecs/h264/parser/viddec_h264secure_parse.c
@@ -0,0 +1,804 @@
+#include "viddec_fw_debug.h"
+#include "viddec_parser_ops.h"
+
+#include "viddec_fw_workload.h"
+#include "viddec_pm.h"
+
+#include "h264.h"
+#include "h264parse.h"
+
+#include "viddec_h264_parse.h"
+#include "h264parse_dpb.h"
+
+/* Init function which can be called to intialized local context on open and flush and preserve*/
+void viddec_h264secure_init(void *ctxt, uint32_t *persist_mem, uint32_t preserve)
+{
+    struct h264_viddec_parser* parser = ctxt;
+    h264_Info * pInfo = &(parser->info);
+
+    if (!preserve)
+    {
+        /* we don't initialize this data if we want to preserve
+           sequence and gop information */
+        h264_init_sps_pps(parser,persist_mem);
+    }
+    /* picture level info which will always be initialized */
+    h264_init_Info_under_sps_pps_level(pInfo);
+#ifdef SW_ERROR_CONCEALEMNT
+   pInfo->sw_bail = 0;
+#endif
+    return;
+}
+
+
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+uint32_t viddec_h264secure_parse(void *parent, void *ctxt)
+{
+    struct h264_viddec_parser* parser = ctxt;
+
+    h264_Info * pInfo = &(parser->info);
+
+    h264_Status status = H264_STATUS_ERROR;
+
+
+    uint8_t nal_ref_idc = 0;
+
+    ///// Parse NAL Unit header
+    pInfo->img.g_new_frame = 0;
+    pInfo->push_to_cur = 1;
+    pInfo->is_current_workload_done =0;
+    pInfo->nal_unit_type = 0;
+
+    h264_Parse_NAL_Unit(parent, pInfo, &nal_ref_idc);
+
+    ///// Check frame bounday for non-vcl elimitter
+    h264_check_previous_frame_end(pInfo);
+
+    //////// Parse valid NAL unit
+    switch ( pInfo->nal_unit_type )
+    {
+    case h264_NAL_UNIT_TYPE_IDR:
+        if (pInfo->got_start)	{
+            pInfo->img.recovery_point_found |= 1;
+        }
+
+        pInfo->sei_rp_received = 0;
+
+    case h264_NAL_UNIT_TYPE_SLICE:
+        ////////////////////////////////////////////////////////////////////////////
+        // Step 1: Check start point
+        ////////////////////////////////////////////////////////////////////////////
+        //
+        /// Slice parsing must start from the valid start point( SPS, PPS,  IDR or recovery point or primary_I)
+        /// 1) No start point reached, append current ES buffer to workload and release it
+        /// 2) else, start parsing
+        //
+        //if(pInfo->got_start && ((pInfo->sei_information.recovery_point) || (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR)))
+        //{
+        //pInfo->img.recovery_point_found = 1;
+        //}
+    {
+
+        h264_Slice_Header_t next_SliceHeader;
+
+        /// Reset next slice header
+        h264_memset(&next_SliceHeader, 0x0, sizeof(h264_Slice_Header_t));
+        next_SliceHeader.nal_ref_idc = nal_ref_idc;
+
+        if ( (1==pInfo->primary_pic_type_plus_one)&&(pInfo->got_start))
+        {
+            pInfo->img.recovery_point_found |=4;
+        }
+        pInfo->primary_pic_type_plus_one = 0;
+
+
+
+#ifndef VBP
+        if (pInfo->img.recovery_point_found == 0) {
+            pInfo->img.structure = FRAME;
+            pInfo->wl_err_curr |= VIDDEC_FW_WORKLOAD_ERR_NOTDECODABLE;
+            pInfo->wl_err_curr |= (FRAME << FIELD_ERR_OFFSET);
+            break;
+        }
+#endif
+
+        ////////////////////////////////////////////////////////////////////////////
+        // Step 2: Parsing slice header
+        ////////////////////////////////////////////////////////////////////////////
+        /// PWT
+        pInfo->h264_pwt_start_byte_offset=0;
+        pInfo->h264_pwt_start_bit_offset=0;
+        pInfo->h264_pwt_end_byte_offset=0;
+        pInfo->h264_pwt_end_bit_offset=0;
+        pInfo->h264_pwt_enabled =0;
+        /// IDR flag
+        next_SliceHeader.idr_flag = (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR);
+
+
+        /// Pass slice header
+        status = h264_Parse_Slice_Layer_Without_Partitioning_RBSP(parent, pInfo, &next_SliceHeader);
+
+        pInfo->sei_information.recovery_point = 0;
+
+        if (next_SliceHeader.sh_error & 3) {
+            pInfo->wl_err_curr |= VIDDEC_FW_WORKLOAD_ERR_NOTDECODABLE;
+
+            // Error type definition, refer to viddec_fw_common_defs.h
+            //		if error in top field, VIDDEC_FW_WORKLOAD_ERR_TOPFIELD			= (1 << 17)
+            //		if error in bottom field, VIDDEC_FW_WORKLOAD_ERR_BOTTOMFIELD	   = (1 << 18)
+            //		if this is frame based, both 2 bits should be set
+            pInfo->wl_err_curr |= (FRAME << FIELD_ERR_OFFSET);
+
+            break;
+        }
+        pInfo->img.current_slice_num++;
+
+
+#ifdef DUMP_HEADER_INFO
+        dump_slice_header(pInfo, &next_SliceHeader);
+////h264_print_decoder_values(pInfo);
+#endif
+
+
+        ////////////////////////////////////////////////////////////////////////////
+        // Step 3: Processing if new picture coming
+        //  1) if it's the second field
+        //	2) if it's a new frame
+        ////////////////////////////////////////////////////////////////////////////
+        //AssignQuantParam(pInfo);
+        if (h264_is_new_picture_start(pInfo, next_SliceHeader, pInfo->SliceHeader))
+        {
+            //
+            ///----------------- New Picture.boundary detected--------------------
+            //
+            pInfo->img.g_new_pic++;
+
+            //
+            // Complete previous picture
+            h264_dpb_store_previous_picture_in_dpb(pInfo, 0, 0); //curr old
+            //h264_hdr_post_poc(0, 0, use_old);
+
+            //
+            // Update slice structures:
+            h264_update_old_slice(pInfo, next_SliceHeader);  	//cur->old; next->cur;
+
+            //
+            // 1) if resolution change: reset dpb
+            // 2) else: init frame store
+            h264_update_img_info(pInfo); //img, dpb
+
+            //
+            ///----------------- New frame.boundary detected--------------------
+            //
+            pInfo->img.second_field = h264_is_second_field(pInfo);
+            if (pInfo->img.second_field == 0)
+            {
+                pInfo->img.g_new_frame = 1;
+                h264_dpb_update_queue_dangling_field(pInfo);
+
+                //
+                /// DPB management
+                ///	1) check the gaps
+                ///	2) assign fs for non-exist frames
+                ///	3) fill the gaps
+                ///	4) store frame into DPB if ...
+                //
+                //if(pInfo->SliceHeader.redundant_pic_cnt)
+                {
+                    h264_dpb_gaps_in_frame_num_mem_management(pInfo);
+                }
+
+#ifdef DUMP_HEADER_INFO
+                dump_new_picture_attr(pInfo, pInfo->SliceHeader.frame_num);
+#endif
+            }
+            //
+            /// Decoding POC
+            h264_hdr_decoding_poc (pInfo, 0, 0);
+
+            //
+            /// Init Frame Store for next frame
+            h264_dpb_init_frame_store (pInfo);
+            pInfo->img.current_slice_num = 1;
+
+            if (pInfo->SliceHeader.first_mb_in_slice != 0)
+            {
+                ////Come here means we have slice lost at the beginning, since no FMO support
+                pInfo->SliceHeader.sh_error |= (pInfo->SliceHeader.structure << 17);
+            }
+
+            //
+            /// Emit out the New Frame
+            if (pInfo->img.g_new_frame)
+            {
+                h264_parse_emit_start_new_frame(parent, pInfo);
+            }
+
+            h264_parse_emit_current_pic(parent, pInfo);
+        }
+        else ///////////////////////////////////////////////////// If Not a picture start
+        {
+            //
+            /// Update slice structures: cur->old; next->cur;
+            h264_update_old_slice(pInfo, next_SliceHeader);
+
+            //
+            /// 1) if resolution change: reset dpb
+            /// 2) else: update img info
+            h264_update_img_info(pInfo);
+        }
+
+
+        //////////////////////////////////////////////////////////////
+        // Step 4: DPB reference list init and reordering
+        //////////////////////////////////////////////////////////////
+
+        //////////////////////////////////////////////// Update frame Type--- IDR/I/P/B for frame or field
+        h264_update_frame_type(pInfo);
+
+
+        h264_dpb_update_ref_lists( pInfo);
+
+#ifdef VBP
+#ifdef SW_ERROR_CONCEALEMNT
+        if ((pInfo->dpb.ltref_frames_in_buffer + pInfo->dpb.ref_frames_in_buffer ) > pInfo->active_SPS.num_ref_frames)
+        {
+            pInfo->sw_bail = 1;
+        }
+#endif
+#endif
+#ifdef DUMP_HEADER_INFO
+        dump_ref_list(pInfo);
+#endif
+        /// Emit out the current "good" slice
+        h264_parse_emit_current_slice(parent, pInfo);
+
+    }
+    break;
+
+    ///// * Main profile doesn't support Data Partition, skipped.... *////
+    case h264_NAL_UNIT_TYPE_DPA:
+    case h264_NAL_UNIT_TYPE_DPB:
+    case h264_NAL_UNIT_TYPE_DPC:
+        //OS_INFO("***********************DP feature, not supported currently*******************\n");
+        pInfo->wl_err_curr |= VIDDEC_FW_WORKLOAD_ERR_NOTDECODABLE;
+        status = H264_STATUS_NOTSUPPORT;
+        break;
+
+        //// * Parsing SEI info *////
+    case h264_NAL_UNIT_TYPE_SEI:
+        status = H264_STATUS_OK;
+
+        //OS_INFO("*****************************SEI**************************************\n");
+        if (pInfo->sps_valid) {
+            //h264_user_data_t user_data; /// Replace with tmp buffer while porting to FW
+            pInfo->number_of_first_au_info_nal_before_first_slice++;
+            /// parsing the SEI info
+            status = h264_Parse_Supplemental_Enhancement_Information_Message(parent, pInfo);
+        }
+
+        //h264_rbsp_trailing_bits(pInfo);
+        break;
+    case h264_NAL_UNIT_TYPE_SPS:
+    {
+        //OS_INFO("*****************************SPS**************************************\n");
+        ///
+        /// Can not define local SPS since the Current local stack size limitation!
+        /// Could be changed after the limitation gone
+        ///
+        uint8_t  old_sps_id=0;
+        vui_seq_parameters_t_not_used vui_seq_not_used;
+
+        old_sps_id = pInfo->active_SPS.seq_parameter_set_id;
+        h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set_used));
+
+
+        status = h264_Parse_SeqParameterSet(parent, pInfo, &(pInfo->active_SPS), &vui_seq_not_used, (int32_t *)pInfo->TMP_OFFSET_REFFRM_PADDR_GL);
+        if (status == H264_STATUS_OK) {
+            h264_Parse_Copy_Sps_To_DDR(pInfo, &(pInfo->active_SPS), pInfo->active_SPS.seq_parameter_set_id);
+            pInfo->sps_valid = 1;
+
+            if (1==pInfo->active_SPS.pic_order_cnt_type) {
+                h264_Parse_Copy_Offset_Ref_Frames_To_DDR(pInfo,(int32_t *)pInfo->TMP_OFFSET_REFFRM_PADDR_GL,pInfo->active_SPS.seq_parameter_set_id);
+            }
+
+#ifdef DUMP_HEADER_INFO
+            dump_sps(&(pInfo->active_SPS));
+#endif
+
+        }
+        ///// Restore the active SPS if new arrival's id changed
+        if (old_sps_id>=MAX_NUM_SPS) {
+            h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set_used));
+            pInfo->active_SPS.seq_parameter_set_id = 0xff;
+        }
+        else {
+            if (old_sps_id!=pInfo->active_SPS.seq_parameter_set_id)  {
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            }
+            else  {
+                //h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set));
+                pInfo->active_SPS.seq_parameter_set_id = 0xff;
+            }
+        }
+
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+    }
+    break;
+    case h264_NAL_UNIT_TYPE_PPS:
+    {
+        //OS_INFO("*****************************PPS**************************************\n");
+
+        uint32_t old_sps_id = pInfo->active_SPS.seq_parameter_set_id;
+        uint32_t old_pps_id = pInfo->active_PPS.pic_parameter_set_id;
+
+        h264_memset(&pInfo->active_PPS, 0x0, sizeof(pic_param_set));
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+
+        if (h264_Parse_PicParameterSet(parent, pInfo, &pInfo->active_PPS)== H264_STATUS_OK)
+        {
+            h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), pInfo->active_PPS.seq_parameter_set_id);
+            if (old_sps_id != pInfo->active_SPS.seq_parameter_set_id)
+            {
+                pInfo->Is_SPS_updated = 1;
+            }
+            if (pInfo->active_SPS.seq_parameter_set_id != 0xff) {
+                h264_Parse_Copy_Pps_To_DDR(pInfo, &pInfo->active_PPS, pInfo->active_PPS.pic_parameter_set_id);
+                pInfo->got_start = 1;
+                if (pInfo->sei_information.recovery_point)
+                {
+                    pInfo->img.recovery_point_found |= 2;
+
+                    //// Enable the RP recovery if no IDR ---Cisco
+                    if ((pInfo->img.recovery_point_found & 1)==0)
+                        pInfo->sei_rp_received = 1;
+                }
+            }
+            else
+            {
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            }
+#ifdef DUMP_HEADER_INFO
+            dump_pps(&(pInfo->active_PPS));
+#endif
+        } else {
+            if (old_sps_id<MAX_NUM_SPS)
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            if (old_pps_id<MAX_NUM_PPS)
+                h264_Parse_Copy_Pps_From_DDR(pInfo, &(pInfo->active_PPS), old_pps_id);
+        }
+
+    } //// End of PPS parsing
+    break;
+
+
+    case h264_NAL_UNIT_TYPE_EOSeq:
+    case h264_NAL_UNIT_TYPE_EOstream:
+
+        h264_parse_emit_eos(parent, pInfo);
+        h264_init_dpb(&(pInfo->dpb));
+
+        pInfo->is_current_workload_done=1;
+
+        /* picture level info which will always be initialized */
+        //h264_init_Info_under_sps_pps_level(pInfo);
+
+        ////reset the pInfo here
+        //viddec_h264_init(ctxt, (uint32_t *)parser->sps_pps_ddr_paddr, false);
+
+
+        status = H264_STATUS_OK;
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+        break;
+
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+#if 1
+        ///// primary_pic_type
+        {
+            uint32_t code = 0xff;
+            int32_t ret = 0;
+            ret = viddec_pm_get_bits(parent, (uint32_t *)&(code), 3);
+
+            if (ret != -1) {
+                //if(pInfo->got_start && (code == 0))
+                //{
+                //pInfo->img.recovery_point_found |= 4;
+                //}
+                pInfo->primary_pic_type_plus_one = (uint8_t)(code)+1;
+                status = H264_STATUS_OK;
+            }
+            pInfo->number_of_first_au_info_nal_before_first_slice++;
+            break;
+        }
+#endif
+
+    case h264_NAL_UNIT_TYPE_Reserved1:
+    case h264_NAL_UNIT_TYPE_Reserved2:
+    case h264_NAL_UNIT_TYPE_Reserved3:
+    case h264_NAL_UNIT_TYPE_Reserved4:
+    case h264_NAL_UNIT_TYPE_Reserved5:
+        status = H264_STATUS_OK;
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+        break;
+
+    case h264_NAL_UNIT_TYPE_filler_data:
+        status = H264_STATUS_OK;
+        break;
+    case h264_NAL_UNIT_TYPE_ACP:
+        break;
+    case h264_NAL_UNIT_TYPE_SPS_extension:
+    case h264_NAL_UNIT_TYPE_unspecified:
+    case h264_NAL_UNIT_TYPE_unspecified2:
+        status = H264_STATUS_OK;
+        //nothing
+        break;
+    default:
+        status = H264_STATUS_OK;
+        break;
+    }
+
+    //pInfo->old_nal_unit_type = pInfo->nal_unit_type;
+    switch ( pInfo->nal_unit_type )
+    {
+    case h264_NAL_UNIT_TYPE_IDR:
+    case h264_NAL_UNIT_TYPE_SLICE:
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+    case h264_NAL_UNIT_TYPE_SPS:
+    case h264_NAL_UNIT_TYPE_PPS:
+    case h264_NAL_UNIT_TYPE_SEI:
+    case h264_NAL_UNIT_TYPE_EOSeq:
+    case h264_NAL_UNIT_TYPE_EOstream:
+    case h264_NAL_UNIT_TYPE_Reserved1:
+    case h264_NAL_UNIT_TYPE_Reserved2:
+    case h264_NAL_UNIT_TYPE_Reserved3:
+    case h264_NAL_UNIT_TYPE_Reserved4:
+    case h264_NAL_UNIT_TYPE_Reserved5:
+    {
+        pInfo->old_nal_unit_type = pInfo->nal_unit_type;
+        break;
+    }
+    default:
+        break;
+    }
+
+    return status;
+}
+
+void viddec_h264secure_get_context_size(viddec_parser_memory_sizes_t *size)
+{
+    /* Should return size of my structure */
+    size->context_size = sizeof(struct h264_viddec_parser);
+    size->persist_size = MAX_NUM_SPS * sizeof(seq_param_set_all)
+                         + MAX_NUM_PPS * sizeof(pic_param_set)
+                         + MAX_NUM_SPS * sizeof(int32_t) * MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE
+                         + sizeof(int32_t) * MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE;
+}
+
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+void viddec_h264secure_flush(void *parent, void *ctxt)
+{
+    int i;
+    struct h264_viddec_parser* parser = ctxt;
+    h264_Info * pInfo = &(parser->info);
+
+    /* just flush dpb and disable output */
+    h264_dpb_flush_dpb(pInfo, 0, pInfo->img.second_field, pInfo->active_SPS.num_ref_frames);
+
+    /* reset the dpb to the initial state, avoid parser store
+       wrong data to dpb in next slice parsing */
+    h264_DecodedPictureBuffer *p_dpb = &pInfo->dpb;
+    for (i = 0; i < NUM_DPB_FRAME_STORES; i++)
+    {
+        p_dpb->fs[i].fs_idc = MPD_DPB_FS_NULL_IDC;
+        p_dpb->fs_dpb_idc[i] = MPD_DPB_FS_NULL_IDC;
+    }
+    p_dpb->used_size = 0;
+    p_dpb->fs_dec_idc = MPD_DPB_FS_NULL_IDC;
+    p_dpb->fs_non_exist_idc = MPD_DPB_FS_NULL_IDC;
+
+    return;
+}
+
+h264_Status h264secure_Parse_Dec_Ref_Pic_Marking(h264_Info* pInfo, void *newdata, h264_Slice_Header_t*SliceHeader)
+{
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+
+    uint8_t i = 0;
+    uint32_t code;
+    if (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        SliceHeader->sh_dec_refpic.no_output_of_prior_pics_flag = (uint8_t)sliceheader_p->ref_pic_marking.no_output_of_prior_pics_flag;
+        SliceHeader->sh_dec_refpic.long_term_reference_flag = (uint8_t)sliceheader_p->ref_pic_marking.long_term_reference_flag;
+        pInfo->img.long_term_reference_flag = SliceHeader->sh_dec_refpic.long_term_reference_flag;
+    }
+    else
+    {
+        SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag = sliceheader_p->ref_pic_marking.adaptive_ref_pic_marking_mode_flag;
+
+        ///////////////////////////////////////////////////////////////////////////////////////
+        //adaptive_ref_pic_marking_mode_flag Reference picture marking mode specified
+        //                              Sliding window reference picture marking mode: A marking mode
+        //                              providing a first-in first-out mechanism for short-term reference pictures.
+        //                              Adaptive reference picture marking mode: A reference picture
+        //                              marking mode providing syntax elements to specify marking of
+        //                              reference pictures as unused for reference?and to assign long-term
+        //                              frame indices.
+        ///////////////////////////////////////////////////////////////////////////////////////
+
+        if (SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag)
+        {
+            do
+            {
+                if (i < NUM_MMCO_OPERATIONS)
+                {
+                    code = sliceheader_p->ref_pic_marking.op[i].memory_management_control_operation;
+                    SliceHeader->sh_dec_refpic.memory_management_control_operation[i] = code;
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 1)
+                    {
+                        SliceHeader->sh_dec_refpic.difference_of_pic_num_minus1[i] = sliceheader_p->ref_pic_marking.op[i].op1.difference_of_pic_nums_minus1;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 2)
+                    {
+                        SliceHeader->sh_dec_refpic.long_term_pic_num[i] = sliceheader_p->ref_pic_marking.op[i].op2.long_term_pic_num;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 6)
+                    {
+                        SliceHeader->sh_dec_refpic.long_term_frame_idx[i] = sliceheader_p->ref_pic_marking.op[i].op6.long_term_frame_idx;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 3) {
+                        SliceHeader->sh_dec_refpic.difference_of_pic_num_minus1[i] = sliceheader_p->ref_pic_marking.op[i].op3.difference_of_pic_nums_minus1;
+                        SliceHeader->sh_dec_refpic.long_term_frame_idx[i] = sliceheader_p->ref_pic_marking.op[i].op3.long_term_frame_idx;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 4)
+                    {
+                        SliceHeader->sh_dec_refpic.max_long_term_frame_idx_plus1[i] = sliceheader_p->ref_pic_marking.op[i].op4.max_long_term_frame_idx_plus1;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 5)
+                    {
+                        pInfo->img.curr_has_mmco_5 = 1;
+                    }
+                }
+
+                if (i >= NUM_MMCO_OPERATIONS) {
+                    return H264_STATUS_ERROR;
+                }
+            } while (SliceHeader->sh_dec_refpic.memory_management_control_operation[i++] != 0);
+        }
+    }
+
+    SliceHeader->sh_dec_refpic.dec_ref_pic_marking_count = i;
+
+    return H264_STATUS_OK;
+}
+
+uint32_t h264secure_Update_Slice_Header(h264_Info* pInfo, void *newdata, h264_Slice_Header_t *SliceHeader)
+{
+    h264_Status retStatus = H264_STATUS_OK;
+    uint8_t data;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+    ///// first_mb_in_slice
+    SliceHeader->first_mb_in_slice = sliceheader_p->slice_header.first_mb_in_slice;
+
+    SliceHeader->pic_parameter_id  = (uint8_t)sliceheader_p->slice_header.pps_id;
+    retStatus = h264_active_par_set(pInfo, SliceHeader);
+
+    switch (pInfo->active_SPS.profile_idc)
+    {
+        case h264_ProfileBaseline:
+        case h264_ProfileMain:
+        case h264_ProfileExtended:
+            pInfo->active_PPS.transform_8x8_mode_flag=0;
+            pInfo->active_PPS.pic_scaling_matrix_present_flag =0;
+            pInfo->active_PPS.second_chroma_qp_index_offset = pInfo->active_PPS.chroma_qp_index_offset;
+        default:
+            break;
+    }
+
+    uint32_t code;
+    int32_t max_mb_num=0;
+
+    SliceHeader->frame_num = (int32_t)sliceheader_p->slice_header.frame_num;
+
+    /// Picture structure
+    SliceHeader->structure = FRAME;
+    SliceHeader->field_pic_flag = 0;
+    SliceHeader->bottom_field_flag = 0;
+
+    if (!(pInfo->active_SPS.sps_disp.frame_mbs_only_flag))
+    {
+        /// field_pic_flag
+        SliceHeader->field_pic_flag = (uint8_t)sliceheader_p->slice_header.field_pic_flag;
+
+        if (SliceHeader->field_pic_flag)
+        {
+            SliceHeader->bottom_field_flag = (uint8_t)sliceheader_p->slice_header.bottom_field_flag;
+            SliceHeader->structure = SliceHeader->bottom_field_flag? BOTTOM_FIELD: TOP_FIELD;
+        }
+    }
+
+    ////// Check valid or not of first_mb_in_slice
+    if (SliceHeader->structure == FRAME) {
+        max_mb_num = pInfo->img.FrameHeightInMbs * pInfo->img.PicWidthInMbs;
+    } else {
+        max_mb_num = pInfo->img.FrameHeightInMbs * pInfo->img.PicWidthInMbs/2;
+    }
+
+
+    if (pInfo->active_SPS.sps_disp.mb_adaptive_frame_field_flag & (!(pInfo->SliceHeader.field_pic_flag))) {
+        SliceHeader->first_mb_in_slice <<=1;
+    }
+
+    if (SliceHeader->first_mb_in_slice >= max_mb_num) {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+
+    if (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        SliceHeader->idr_pic_id = sliceheader_p->slice_header.idr_pic_id;
+    }
+
+    if (pInfo->active_SPS.pic_order_cnt_type == 0)
+    {
+        SliceHeader->pic_order_cnt_lsb = (uint32_t)sliceheader_p->slice_header.pic_order_cnt_lsb;
+
+
+        if ((pInfo->active_PPS.pic_order_present_flag) && !(SliceHeader->field_pic_flag))
+        {
+            SliceHeader->delta_pic_order_cnt_bottom = sliceheader_p->slice_header.delta_pic_order_cnt_bottom;
+        }
+        else
+        {
+            SliceHeader->delta_pic_order_cnt_bottom = 0;
+        }
+    }
+
+    if ((pInfo->active_SPS.pic_order_cnt_type == 1) && !(pInfo->active_SPS.delta_pic_order_always_zero_flag))
+    {
+        SliceHeader->delta_pic_order_cnt[0] = sliceheader_p->slice_header.delta_pic_order_cnt[0];
+        if ((pInfo->active_PPS.pic_order_present_flag) && !(SliceHeader->field_pic_flag))
+        {
+            SliceHeader->delta_pic_order_cnt[1] = sliceheader_p->slice_header.delta_pic_order_cnt[1];
+        }
+    }
+/*
+    if (pInfo->active_PPS.redundant_pic_cnt_present_flag)
+    {
+        SliceHeader->redundant_pic_cnt = sliceheader_p->slice_header.redundant_pic_cnt;
+        if (SliceHeader->redundant_pic_cnt > 127) {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    } else {
+        SliceHeader->redundant_pic_cnt = 0;
+    }
+*/
+    ////
+    //// Parse Ref_pic marking if there
+    ////
+    if (SliceHeader->nal_ref_idc != 0)
+    {
+        if (h264secure_Parse_Dec_Ref_Pic_Marking(pInfo, newdata, SliceHeader) != H264_STATUS_OK)
+        {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    }
+    retStatus = H264_STATUS_OK;
+    return retStatus;
+}
+uint32_t viddec_h264secure_update(void *parent, void *data, uint32_t size)
+{
+    viddec_pm_cxt_t * parser_cxt = (viddec_pm_cxt_t *)parent;
+    struct h264_viddec_parser* parser = (struct h264_viddec_parser*) &parser_cxt->codec_data[0];
+    h264_Info * pInfo = &(parser->info);
+
+    h264_Status status = H264_STATUS_ERROR;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) data;
+
+    pInfo->img.g_new_frame = 0;
+    pInfo->push_to_cur = 1;
+    pInfo->is_current_workload_done =0;
+    pInfo->nal_unit_type = 0;
+    pInfo->nal_unit_type = sliceheader_p->slice_header.nal_unit_type & 0x1F;
+
+    h264_Slice_Header_t next_SliceHeader;
+
+    /// Reset next slice header
+    h264_memset(&next_SliceHeader, 0x0, sizeof(h264_Slice_Header_t));
+    next_SliceHeader.nal_ref_idc = (sliceheader_p->slice_header.nal_unit_type & 0x60) >> 5;
+
+    if ( (1==pInfo->primary_pic_type_plus_one)&&(pInfo->got_start))
+    {
+        pInfo->img.recovery_point_found |=4;
+    }
+    pInfo->primary_pic_type_plus_one = 0;
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Step 2: Parsing slice header
+    ////////////////////////////////////////////////////////////////////////////
+    /// PWT
+    pInfo->h264_pwt_start_byte_offset=0;
+    pInfo->h264_pwt_start_bit_offset=0;
+    pInfo->h264_pwt_end_byte_offset=0;
+    pInfo->h264_pwt_end_bit_offset=0;
+    pInfo->h264_pwt_enabled =0;
+    /// IDR flag
+    next_SliceHeader.idr_flag = (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR);
+
+    /// Pass slice header
+    status = h264secure_Update_Slice_Header(pInfo, sliceheader_p, &next_SliceHeader);
+
+    pInfo->sei_information.recovery_point = 0;
+    pInfo->img.current_slice_num++;
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Step 3: Processing if new picture coming
+    //  1) if it's the second field
+    //  2) if it's a new frame
+    ////////////////////////////////////////////////////////////////////////////
+    //AssignQuantParam(pInfo);
+    if (h264_is_new_picture_start(pInfo, next_SliceHeader, pInfo->SliceHeader))
+    {
+        //
+        ///----------------- New Picture.boundary detected--------------------
+        //
+        pInfo->img.g_new_pic++;
+
+        //
+        // Complete previous picture
+        h264_dpb_store_previous_picture_in_dpb(pInfo, 0, 0); //curr old
+
+        //
+        // Update slice structures:
+        h264_update_old_slice(pInfo, next_SliceHeader);  //cur->old; next->cur;
+
+        //
+        // 1) if resolution change: reset dpb
+        // 2) else: init frame store
+        h264_update_img_info(pInfo);  //img, dpb
+
+        //
+        ///----------------- New frame.boundary detected--------------------
+        //
+        pInfo->img.second_field = h264_is_second_field(pInfo);
+        if (pInfo->img.second_field == 0)
+        {
+            pInfo->img.g_new_frame = 1;
+            h264_dpb_update_queue_dangling_field(pInfo);
+            h264_dpb_gaps_in_frame_num_mem_management(pInfo);
+        }
+        /// Decoding POC
+        h264_hdr_decoding_poc (pInfo, 0, 0);
+        //
+        /// Init Frame Store for next frame
+        h264_dpb_init_frame_store (pInfo);
+        pInfo->img.current_slice_num = 1;
+
+        if (pInfo->SliceHeader.first_mb_in_slice != 0)
+        {
+            ////Come here means we have slice lost at the beginning, since no FMO support
+            pInfo->SliceHeader.sh_error |= (pInfo->SliceHeader.structure << 17);
+        }
+    }
+    else ///////////////////////////////////////////////////// If Not a picture start
+    {
+        /// Update slice structures: cur->old; next->cur;
+        h264_update_old_slice(pInfo, next_SliceHeader);
+        /// 1) if resolution change: reset dpb
+        /// 2) else: update img info
+        h264_update_img_info(pInfo);
+    }
+    return status;
+}
diff --git a/mix_vbp/viddec_fw/fw/parser/Android.mk b/mix_vbp/viddec_fw/fw/parser/Android.mk
index 659b473..646e910 100644
--- a/mix_vbp/viddec_fw/fw/parser/Android.mk
+++ b/mix_vbp/viddec_fw/fw/parser/Android.mk
@@ -51,4 +51,11 @@
 LOCAL_CFLAGS += -DUSE_HW_VP8
 endif
 
+PLATFORM_SUPPORT_AVC_SHORT_FORMAT := \
+    baytrail
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_AVC_SHORT_FORMAT)),)
+LOCAL_CFLAGS += -DUSE_AVC_SHORT_FORMAT
+LOCAL_SRC_FILES += vbp_h264secure_parser.c
+endif
 include $(BUILD_SHARED_LIBRARY)
diff --git a/mix_vbp/viddec_fw/fw/parser/include/viddec_parser_ops.h b/mix_vbp/viddec_fw/fw/parser/include/viddec_parser_ops.h
index 561b179..66812f5 100644
--- a/mix_vbp/viddec_fw/fw/parser/include/viddec_parser_ops.h
+++ b/mix_vbp/viddec_fw/fw/parser/include/viddec_parser_ops.h
@@ -26,6 +26,9 @@
 typedef    uint32_t (*fn_gen_contrib_tags)(void *parent, uint32_t ignore_partial);
 typedef    uint32_t (*fn_gen_assoc_tags)(void *parent);
 typedef    void (*fn_flush_parser) (void *parent, void *ctxt);
+#ifdef USE_AVC_SHORT_FORMAT
+typedef    uint32_t (*fn_update_data)(void *parent, void *data, uint32_t size);
+#endif
 
 
 typedef struct
@@ -39,6 +42,9 @@
     fn_gen_contrib_tags gen_contrib_tags;
     fn_gen_assoc_tags gen_assoc_tags;
     fn_flush_parser flush;
+#ifdef USE_AVC_SHORT_FORMAT
+    fn_update_data update_data;
+#endif
 } viddec_parser_ops_t;
 
 
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.c b/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.c
new file mode 100644
index 0000000..498cbc4
--- /dev/null
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.c
@@ -0,0 +1,1830 @@
+/* INTEL CONFIDENTIAL
+* Copyright (c) 2009, 2012 Intel Corporation.  All rights reserved.
+*
+* The source code contained or described herein and all documents
+* related to the source code ("Material") are owned by Intel
+* Corporation or its suppliers or licensors.  Title to the
+* Material remains with Intel Corporation or its suppliers and
+* licensors.  The Material contains trade secrets and proprietary
+* and confidential information of Intel or its suppliers and
+* licensors. The Material is protected by worldwide copyright and
+* trade secret laws and treaty provisions.  No part of the Material
+* may be used, copied, reproduced, modified, published, uploaded,
+* posted, transmitted, distributed, or disclosed in any way without
+* Intel's prior express written permission.
+*
+* No license under any patent, copyright, trade secret or other
+* intellectual property right is granted to or conferred upon you
+* by disclosure or delivery of the Materials, either expressly, by
+* implication, inducement, estoppel or otherwise. Any license
+* under such intellectual property rights must be express and
+* approved by Intel in writing.
+*
+*/
+
+
+#include <dlfcn.h>
+#include "h264.h"
+#include "vbp_loader.h"
+#include "vbp_utils.h"
+#include "vbp_h264secure_parser.h"
+
+#define TERMINATE_KEY 0xFFFFFFFF
+
+typedef struct vbp_h264secure_parser_private vbp_h264secure_parser_private;
+
+typedef enum
+{
+    H264_BS_LENGTH_PREFIXED,
+    H264_BS_SC_PREFIXED,
+    H264_BS_SINGLE_NAL
+} H264_BS_PATTERN;
+
+struct vbp_h264secure_parser_private
+{
+    /* number of bytes used to encode length of NAL payload.  If parser does not receive configuration data
+    and NAL_length_size is equal to zero when bitstream parsing begins, we assume bitstream is in AnnexB
+    byte stream format. */
+    int NAL_length_size;
+
+    /* indicate if stream is length prefixed */
+    int length_prefix_verified;
+
+    H264_BS_PATTERN bitstream_pattern;
+
+    uint8_t* start;
+    int32_t  offset;
+    int32_t  size;
+};
+
+/* default scaling list table */
+static unsigned char Default_4x4_Intra[16] =
+{
+    6,13,20,28,
+    13,20,28,32,
+    20,28,32,37,
+    28,32,37,42
+};
+
+static unsigned char Default_4x4_Inter[16] =
+{
+    10,14,20,24,
+    14,20,24,27,
+    20,24,27,30,
+    24,27,30,34
+};
+
+static unsigned char Default_8x8_Intra[64] =
+{
+    6,10,13,16,18,23,25,27,
+    10,11,16,18,23,25,27,29,
+    13,16,18,23,25,27,29,31,
+    16,18,23,25,27,29,31,33,
+    18,23,25,27,29,31,33,36,
+    23,25,27,29,31,33,36,38,
+    25,27,29,31,33,36,38,40,
+    27,29,31,33,36,38,40,42
+};
+
+static unsigned char Default_8x8_Inter[64] =
+{
+    9,13,15,17,19,21,22,24,
+    13,13,17,19,21,22,24,25,
+    15,17,19,21,22,24,25,27,
+    17,19,21,22,24,25,27,28,
+    19,21,22,24,25,27,28,30,
+    21,22,24,25,27,28,30,32,
+    22,24,25,27,28,30,32,33,
+    24,25,27,28,30,32,33,35
+};
+
+static unsigned char quant_flat[16] =
+{
+    16,16,16,16,
+    16,16,16,16,
+    16,16,16,16,
+    16,16,16,16
+};
+
+static unsigned char quant8_flat[64] =
+{
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16
+};
+
+static unsigned char* UseDefaultList[8] =
+{
+    Default_4x4_Intra, Default_4x4_Intra, Default_4x4_Intra,
+    Default_4x4_Inter, Default_4x4_Inter, Default_4x4_Inter,
+    Default_8x8_Intra,
+    Default_8x8_Inter
+};
+
+static uint8 h264_aspect_ratio_table[][2] =
+{
+    {0, 0},
+    {1, 1},
+    {12, 11},
+    {10, 11},
+    {16, 11},
+    {40, 33},
+    {24, 11},
+    {20, 11},
+    {32, 11},
+    {80, 33},
+    {18, 11},
+    {15, 11},
+    {64, 33},
+    {160, 99},
+    {4, 3},
+    {3, 2},
+    {2, 1},
+    // reserved
+    {0, 0}
+};
+
+
+
+/**
+ *
+ */
+uint32 vbp_init_parser_entries_h264secure(vbp_context *pcontext)
+{
+    if (NULL == pcontext->parser_ops)
+    {
+        return VBP_PARM;
+    }
+    pcontext->parser_ops->init = dlsym(pcontext->fd_parser, "viddec_h264secure_init");
+    if (NULL == pcontext->parser_ops->init)
+    {
+        ETRACE ("Failed to set entry point." );
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->parse_sc = viddec_parse_sc;
+
+    pcontext->parser_ops->parse_syntax = dlsym(pcontext->fd_parser, "viddec_h264secure_parse");
+    if (NULL == pcontext->parser_ops->parse_syntax)
+    {
+        ETRACE ("Failed to set entry point." );
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->get_cxt_size = dlsym(pcontext->fd_parser, "viddec_h264secure_get_context_size");
+    if (NULL == pcontext->parser_ops->get_cxt_size)
+    {
+        ETRACE ("Failed to set entry point." );
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->update_data = dlsym(pcontext->fd_parser, "viddec_h264secure_update");
+    if (NULL == pcontext->parser_ops->update_data)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+
+    /* entry point not needed */
+    pcontext->parser_ops->is_wkld_done = NULL;
+    pcontext->parser_ops->flush = NULL;
+    pcontext->parser_ops->is_frame_start = NULL;
+    return VBP_OK;
+}
+
+
+/**
+ *
+ */
+uint32 vbp_allocate_query_data_h264secure(vbp_context *pcontext)
+{
+    if (NULL != pcontext->query_data)
+    {
+        return VBP_PARM;
+    }
+
+    pcontext->query_data = NULL;
+    vbp_data_h264 *query_data = NULL;
+
+    query_data = vbp_malloc_set0(vbp_data_h264, 1);
+    if (NULL == query_data)
+    {
+        goto cleanup;
+    }
+
+    /* assign the pointer */
+    pcontext->query_data = (void *)query_data;
+
+    query_data->pic_data = vbp_malloc_set0(vbp_picture_data_h264, MAX_NUM_PICTURES);
+    if (NULL == query_data->pic_data)
+    {
+        goto cleanup;
+    }
+
+    int i;
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].pic_parms = vbp_malloc_set0(VAPictureParameterBufferH264, 1);
+        if (NULL == query_data->pic_data[i].pic_parms)
+        {
+            goto cleanup;
+        }
+        query_data->pic_data[i].num_slices = 0;
+        query_data->pic_data[i].slc_data = vbp_malloc_set0(vbp_slice_data_h264, MAX_NUM_SLICES);
+        if (NULL == query_data->pic_data[i].slc_data)
+        {
+            goto cleanup;
+        }
+    }
+
+
+    query_data->IQ_matrix_buf = vbp_malloc_set0(VAIQMatrixBufferH264, 1);
+    if (NULL == query_data->IQ_matrix_buf)
+    {
+        goto cleanup;
+    }
+
+    query_data->codec_data = vbp_malloc_set0(vbp_codec_data_h264, 1);
+    if (NULL == query_data->codec_data)
+    {
+        goto cleanup;
+    }
+
+    pcontext->parser_private = NULL;
+    vbp_h264secure_parser_private *parser_private = NULL;
+
+    parser_private = vbp_malloc_set0(vbp_h264secure_parser_private, 1);
+    if (NULL == parser_private)
+    {
+        goto cleanup;
+    }
+
+    /* assign the pointer */
+    pcontext->parser_private = (void *)parser_private;
+
+    /* init the pointer */
+    parser_private->start = 0;
+    parser_private->offset = 0;
+    parser_private->size = 0;
+    parser_private->NAL_length_size = 0;
+    parser_private->length_prefix_verified = 0;
+    parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+
+    return VBP_OK;
+
+cleanup:
+    vbp_free_query_data_h264secure(pcontext);
+
+    return VBP_MEM;
+}
+
+uint32 vbp_free_query_data_h264secure(vbp_context *pcontext)
+{
+    if (NULL != pcontext->parser_private)
+    {
+        free(pcontext->parser_private);
+        pcontext->parser_private = NULL;
+    }
+
+    if (NULL == pcontext->query_data)
+    {
+        return VBP_OK;
+    }
+
+    int i;
+    vbp_data_h264 *query_data;
+    query_data = (vbp_data_h264 *)pcontext->query_data;
+
+    if (query_data->pic_data)
+    {
+        for (i = 0; i < MAX_NUM_PICTURES; i++)
+        {
+            free(query_data->pic_data[i].slc_data);
+            free(query_data->pic_data[i].pic_parms);
+        }
+        free(query_data->pic_data);
+    }
+
+    free(query_data->IQ_matrix_buf);
+    free(query_data->codec_data);
+    free(query_data);
+
+    pcontext->query_data = NULL;
+
+    return VBP_OK;
+}
+
+
+static inline uint16_t vbp_utils_ntohs(uint8_t* p)
+{
+    uint16_t i = ((*p) << 8) + ((*(p+1)));
+    return i;
+}
+
+static inline uint32_t vbp_utils_ntohl(uint8_t* p)
+{
+    uint32_t i = ((*p) << 24) + ((*(p+1)) << 16) + ((*(p+2)) << 8) + ((*(p+3)));
+    return i;
+}
+
+
+static inline void vbp_set_VAPicture_h264secure(
+    int curr_picture_structure,
+    int bottom_field,
+    frame_store* store,
+    VAPictureH264* pic)
+{
+    if (FRAME == curr_picture_structure)
+    {
+        if (FRAME != viddec_h264_get_dec_structure(store))
+        {
+            WTRACE("Reference picture structure is not frame for current frame picture!");
+        }
+        pic->flags = 0;
+        pic->TopFieldOrderCnt = store->top_field.poc;
+        pic->BottomFieldOrderCnt = store->bottom_field.poc;
+    }
+    else
+    {
+        if (FRAME == viddec_h264_get_dec_structure(store))
+        {
+            WTRACE("reference picture structure is frame for current field picture!");
+        }
+        if (bottom_field)
+        {
+            pic->flags = VA_PICTURE_H264_BOTTOM_FIELD;
+            pic->TopFieldOrderCnt = store->top_field.poc;
+            pic->BottomFieldOrderCnt = store->bottom_field.poc;
+        }
+        else
+        {
+            pic->flags = VA_PICTURE_H264_TOP_FIELD;
+            pic->TopFieldOrderCnt = store->top_field.poc;
+            pic->BottomFieldOrderCnt = store->bottom_field.poc;
+        }
+    }
+}
+
+static inline void vbp_set_slice_ref_list_h264secure(
+    struct h264_viddec_parser* h264_parser,
+    VASliceParameterBufferH264 *slc_parms)
+{
+    int i, j;
+    int num_ref_idx_active = 0;
+    h264_Slice_Header_t* slice_header = &(h264_parser->info.SliceHeader);
+    uint8_t* p_list = NULL;
+    VAPictureH264* refPicListX = NULL;
+    frame_store* fs = NULL;
+
+    /* initialize ref picutre list, set picture id and flags to invalid. */
+
+    for (i = 0; i < 2; i++)
+    {
+        refPicListX = (i == 0) ? &(slc_parms->RefPicList0[0]) : &(slc_parms->RefPicList1[0]);
+        for (j = 0; j < 32; j++)
+        {
+            refPicListX->picture_id = VA_INVALID_SURFACE;
+            refPicListX->frame_idx = 0;
+            refPicListX->flags = VA_PICTURE_H264_INVALID;
+            refPicListX->TopFieldOrderCnt = 0;
+            refPicListX->BottomFieldOrderCnt = 0;
+            refPicListX++;
+        }
+    }
+
+    for (i = 0; i < 2; i++)
+    {
+        refPicListX = (i == 0) ? &(slc_parms->RefPicList0[0]) : &(slc_parms->RefPicList1[0]);
+
+        if ((i == 0) &&
+            ((h264_PtypeB == slice_header->slice_type) ||
+             (h264_PtypeP == slice_header->slice_type)))
+        {
+            num_ref_idx_active = slice_header->num_ref_idx_l0_active;
+            if (slice_header->sh_refpic_l0.ref_pic_list_reordering_flag)
+            {
+                p_list = h264_parser->info.slice_ref_list0;
+            }
+            else
+            {
+                p_list = h264_parser->info.dpb.listX_0;
+            }
+        }
+        else if ((i == 1) && (h264_PtypeB == slice_header->slice_type))
+        {
+            num_ref_idx_active = slice_header->num_ref_idx_l1_active;
+            if (slice_header->sh_refpic_l1.ref_pic_list_reordering_flag)
+            {
+                p_list = h264_parser->info.slice_ref_list1;
+            }
+            else
+            {
+                p_list = h264_parser->info.dpb.listX_1;
+            }
+        }
+        else
+        {
+            num_ref_idx_active = 0;
+            p_list = NULL;
+        }
+
+
+        for (j = 0; j < num_ref_idx_active; j++)
+        {
+            fs = &(h264_parser->info.dpb.fs[(p_list[j] & 0x1f)]);
+
+            /* bit 5 indicates if reference picture is bottom field */
+            vbp_set_VAPicture_h264secure(
+                h264_parser->info.img.structure,
+                (p_list[j] & 0x20) >> 5,
+                fs,
+                refPicListX);
+
+            refPicListX->frame_idx = fs->frame_num;
+            refPicListX->flags |= viddec_h264_get_is_long_term(fs) ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+            refPicListX++;
+        }
+    }
+}
+
+static inline void vbp_set_pre_weight_table_h264secure(
+    struct h264_viddec_parser* h264_parser,
+    VASliceParameterBufferH264 *slc_parms)
+{
+    h264_Slice_Header_t* slice_header = &(h264_parser->info.SliceHeader);
+    int i, j;
+
+    if ((((h264_PtypeP == slice_header->slice_type) ||
+          (h264_PtypeB == slice_header->slice_type)) &&
+          h264_parser->info.active_PPS.weighted_pred_flag) ||
+         ((h264_PtypeB == slice_header->slice_type) &&
+         (1 == h264_parser->info.active_PPS.weighted_bipred_idc)))
+    {
+        slc_parms->luma_log2_weight_denom = slice_header->sh_predwttbl.luma_log2_weight_denom;
+        slc_parms->chroma_log2_weight_denom = slice_header->sh_predwttbl.chroma_log2_weight_denom;
+        slc_parms->luma_weight_l0_flag = slice_header->sh_predwttbl.luma_weight_l0_flag;
+        slc_parms->chroma_weight_l0_flag = slice_header->sh_predwttbl.chroma_weight_l0_flag;
+        slc_parms->luma_weight_l1_flag = slice_header->sh_predwttbl.luma_weight_l1_flag;
+        slc_parms->chroma_weight_l1_flag = slice_header->sh_predwttbl.chroma_weight_l1_flag;
+
+        for (i = 0; i < 32; i++)
+        {
+            slc_parms->luma_weight_l0[i] = slice_header->sh_predwttbl.luma_weight_l0[i];
+            slc_parms->luma_offset_l0[i] = slice_header->sh_predwttbl.luma_offset_l0[i];
+            slc_parms->luma_weight_l1[i] = slice_header->sh_predwttbl.luma_weight_l1[i];
+            slc_parms->luma_offset_l1[i] = slice_header->sh_predwttbl.luma_offset_l1[i];
+
+            for (j = 0; j < 2; j++)
+            {
+                slc_parms->chroma_weight_l0[i][j] = slice_header->sh_predwttbl.chroma_weight_l0[i][j];
+                slc_parms->chroma_offset_l0[i][j] = slice_header->sh_predwttbl.chroma_offset_l0[i][j];
+                slc_parms->chroma_weight_l1[i][j] = slice_header->sh_predwttbl.chroma_weight_l1[i][j];
+                slc_parms->chroma_offset_l1[i][j] = slice_header->sh_predwttbl.chroma_offset_l1[i][j];
+            }
+        }
+    }
+    else
+    {
+        /* default weight table */
+        slc_parms->luma_log2_weight_denom = 5;
+        slc_parms->chroma_log2_weight_denom = 5;
+        slc_parms->luma_weight_l0_flag = 0;
+        slc_parms->luma_weight_l1_flag = 0;
+        slc_parms->chroma_weight_l0_flag = 0;
+        slc_parms->chroma_weight_l1_flag = 0;
+        for (i = 0; i < 32; i++)
+        {
+            slc_parms->luma_weight_l0[i] = 0;
+            slc_parms->luma_offset_l0[i] = 0;
+            slc_parms->luma_weight_l1[i] = 0;
+            slc_parms->luma_offset_l1[i] = 0;
+
+            for (j = 0; j < 2; j++)
+            {
+                slc_parms->chroma_weight_l0[i][j] = 0;
+                slc_parms->chroma_offset_l0[i][j] = 0;
+                slc_parms->chroma_weight_l1[i][j] = 0;
+                slc_parms->chroma_offset_l1[i][j] = 0;
+            }
+        }
+    }
+}
+
+
+static inline void vbp_set_reference_frames_h264secure(
+    struct h264_viddec_parser *parser,
+    VAPictureParameterBufferH264* pic_parms)
+{
+    int buffer_idx;
+    int frame_idx;
+    frame_store* store = NULL;
+    h264_DecodedPictureBuffer* dpb = &(parser->info.dpb);
+    /* initialize reference frames */
+    for (frame_idx = 0; frame_idx < 16; frame_idx++)
+    {
+        pic_parms->ReferenceFrames[frame_idx].picture_id = VA_INVALID_SURFACE;
+        pic_parms->ReferenceFrames[frame_idx].frame_idx = 0;
+        pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_INVALID;
+        pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = 0;
+        pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = 0;
+    }
+    pic_parms->num_ref_frames = 0;
+
+    frame_idx = 0;
+
+    /* ITRACE("short term frame in dpb %d", dpb->ref_frames_in_buffer);  */
+    /* set short term reference frames */
+    for (buffer_idx = 0; buffer_idx < dpb->ref_frames_in_buffer; buffer_idx++)
+    {
+        if (frame_idx >= 16 || buffer_idx >= 16)
+        {
+            WTRACE("Frame index is out of bound.");
+            break;
+        }
+
+        store = &dpb->fs[dpb->fs_ref_idc[buffer_idx]];
+        /* if (store->is_used == 3 && store->frame.used_for_reference == 3) */
+        if (viddec_h264_get_is_used(store))
+        {
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = store->frame_num;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+            if (FRAME == parser->info.img.structure)
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+            }
+            else
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+                if (store->top_field.used_for_reference && store->bottom_field.used_for_reference)
+                {
+                    /* if both fields are used for reference, just set flag to be frame (0) */
+                }
+                else
+                {
+                    if (store->top_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_TOP_FIELD;
+                    if (store->bottom_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_BOTTOM_FIELD;
+                }
+            }
+        }
+        frame_idx++;
+    }
+
+    /* set long term reference frames */
+    for (buffer_idx = 0; buffer_idx < dpb->ltref_frames_in_buffer; buffer_idx++)
+    {
+        if (frame_idx >= 16 || buffer_idx >= 16)
+        {
+            WTRACE("Frame index is out of bound.");
+            break;
+        }
+        store = &dpb->fs[dpb->fs_ltref_idc[buffer_idx]];
+        if (!viddec_h264_get_is_long_term(store))
+        {
+            WTRACE("long term frame is not marked as long term.");
+        }
+        /*if (store->is_used == 3 && store->is_long_term && store->frame.used_for_reference == 3) */
+        if (viddec_h264_get_is_used(store))
+        {
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = store->long_term_frame_idx;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_LONG_TERM_REFERENCE;
+            if (FRAME == parser->info.img.structure)
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->frame.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->frame.poc;
+            }
+            else
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+                if (store->top_field.used_for_reference && store->bottom_field.used_for_reference)
+                {
+                    /* if both fields are used for reference, just set flag to be frame (0)*/
+                }
+                else
+                {
+                    if (store->top_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_TOP_FIELD;
+                    if (store->bottom_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_BOTTOM_FIELD;
+                }
+            }
+        }
+        frame_idx++;
+    }
+
+    pic_parms->num_ref_frames = parser->info.active_SPS.num_ref_frames;
+
+    if (frame_idx > parser->info.active_SPS.num_ref_frames)
+    {
+        WTRACE("actual num_ref_frames (%d) exceeds the value in the sequence header (%d).",
+               frame_idx, parser->info.active_SPS.num_ref_frames);
+    }
+}
+
+
+static inline void vbp_set_scaling_list_h264secure(
+    struct h264_viddec_parser *parser,
+    VAIQMatrixBufferH264* IQ_matrix_buf)
+{
+    int i;
+    int lists_to_set = 6 + 2 * (parser->info.active_PPS.transform_8x8_mode_flag ? 1 : 0);
+
+    if (parser->info.active_PPS.pic_scaling_matrix_present_flag)
+    {
+        for (i = 0; i < lists_to_set; i++)
+        {
+            if (parser->info.active_PPS.pic_scaling_list_present_flag[i])
+            {
+                if (((i < 6) && parser->info.active_PPS.UseDefaultScalingMatrix4x4Flag[i]) ||
+                        ((i >= 6) && parser->info.active_PPS.UseDefaultScalingMatrix8x8Flag[i-6]))
+                {
+                    /* use default scaling list */
+                    if (i < 6)
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                    }
+                    else
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                    }
+                }
+                else
+                {
+                    /* use PPS list */
+                    if (i < 6)
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], parser->info.active_PPS.ScalingList4x4[i], 16);
+                    }
+                    else
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], parser->info.active_PPS.ScalingList8x8[i - 6], 64);
+                    }
+                }
+            }
+            else /* pic_scaling_list not present */
+            {
+                if (parser->info.active_SPS.seq_scaling_matrix_present_flag)
+                {
+                    /* SPS matrix present - use fallback rule B */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               parser->info.active_SPS.seq_scaling_list_present_flag[i] ? parser->info.active_PPS.ScalingList4x4[i] : UseDefaultList[i],
+                               16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6],
+                               parser->info.active_SPS.seq_scaling_list_present_flag[i] ? parser->info.active_PPS.ScalingList8x8[i - 6] : UseDefaultList[i],
+                               64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        //g_warning("invalid scaling list index.");
+                        break;
+                    }
+                }
+                else /* seq_scaling_matrix not present */
+                {
+                    /* SPS matrix not present - use fallback rule A */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        WTRACE("invalid scaling list index.");
+                        break;
+                    }
+                } /* end of seq_scaling_matrix not present */
+            } /* end of  pic_scaling_list not present */
+        } /* for loop for each index from 0 to 7 */
+    } /* end of pic_scaling_matrix present */
+    else
+    {
+        /* PPS matrix not present, use SPS information */
+        if (parser->info.active_SPS.seq_scaling_matrix_present_flag)
+        {
+            for (i = 0; i < lists_to_set; i++)
+            {
+                if (parser->info.active_SPS.seq_scaling_list_present_flag[i])
+                {
+                    if (((i < 6) && parser->info.active_SPS.UseDefaultScalingMatrix4x4Flag[i]) ||
+                            ((i >= 6) && parser->info.active_SPS.UseDefaultScalingMatrix8x8Flag[i - 6]))
+                    {
+                        /* use default scaling list */
+                        if (i < 6)
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        }
+                        else
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        }
+                    }
+                    else
+                    {
+                        /* use SPS list */
+                        if (i < 6)
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList4x4[i], parser->info.active_SPS.ScalingList4x4[i], 16);
+                        }
+                        else
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], parser->info.active_SPS.ScalingList8x8[i - 6], 64);
+                        }
+                    }
+                }
+                else
+                {
+                    /* SPS list not present - use fallback rule A */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        WTRACE("invalid scaling list index.");
+                        break;
+                    }
+                }
+            }
+        }
+        else
+        {
+            /* SPS matrix not present - use flat lists */
+            for (i = 0; i < 6; i++)
+            {
+                memcpy(IQ_matrix_buf->ScalingList4x4[i], quant_flat, 16);
+            }
+            for (i = 0; i < 2; i++)
+            {
+                memcpy(IQ_matrix_buf->ScalingList8x8[i], quant8_flat, 64);
+            }
+        }
+    }
+
+    if ((0 == parser->info.active_PPS.transform_8x8_mode_flag) &&
+            (parser->info.active_PPS.pic_scaling_matrix_present_flag ||
+             parser->info.active_SPS.seq_scaling_matrix_present_flag))
+    {
+        for (i = 0; i < 2; i++)
+        {
+            memcpy(IQ_matrix_buf->ScalingList8x8[i], quant8_flat, 64);
+        }
+    }
+}
+
+static void vbp_set_codec_data_h264secure(
+    struct h264_viddec_parser *parser,
+     vbp_data_h264 *query_data)
+{
+    vbp_codec_data_h264* codec_data = query_data->codec_data;
+
+    /* The following variables are used to detect if there is new SPS or PPS */
+    uint8 seq_parameter_set_id = codec_data->seq_parameter_set_id;
+    uint8 pic_parameter_set_id = codec_data->pic_parameter_set_id;
+    int frame_width = codec_data->frame_width;
+    int frame_height = codec_data->frame_height;
+
+    /* parameter id */
+    codec_data->seq_parameter_set_id = parser->info.active_SPS.seq_parameter_set_id;
+    codec_data->pic_parameter_set_id = parser->info.active_PPS.pic_parameter_set_id;
+
+    /* profile and level */
+    codec_data->profile_idc = parser->info.active_SPS.profile_idc;
+    codec_data->level_idc = parser->info.active_SPS.level_idc;
+
+
+    /*constraint flag sets (h.264 Spec v2009)*/
+    codec_data->constraint_set0_flag = (parser->info.active_SPS.constraint_set_flags & 0x10) >> 4;
+    codec_data->constraint_set1_flag = (parser->info.active_SPS.constraint_set_flags & 0x8) >> 3;
+    codec_data->constraint_set2_flag = (parser->info.active_SPS.constraint_set_flags & 0x4) >> 2;
+    codec_data->constraint_set3_flag = (parser->info.active_SPS.constraint_set_flags & 0x2) >> 1;
+    codec_data->constraint_set4_flag = parser->info.active_SPS.constraint_set_flags & 0x1;
+
+    /* reference frames */
+    codec_data->num_ref_frames = parser->info.active_SPS.num_ref_frames;
+
+    if (!parser->info.active_SPS.sps_disp.frame_mbs_only_flag &&
+        !parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag)
+    {
+        /* no longer necessary: two fields share the same interlaced surface */
+        /* codec_data->num_ref_frames *= 2; */
+    }
+
+    codec_data->gaps_in_frame_num_value_allowed_flag = parser->info.active_SPS.gaps_in_frame_num_value_allowed_flag;
+
+    /* frame coding */
+    codec_data->frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+    codec_data->mb_adaptive_frame_field_flag = parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag;
+
+    /* frame dimension */
+    codec_data->frame_width = (parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1 + 1 ) * 16;
+
+    codec_data->frame_height = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+                               (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) * 16;
+
+    /* cropping information */
+    codec_data->crop_left = 0;
+    codec_data->crop_right = 0;
+    codec_data->crop_top = 0;
+    codec_data->crop_bottom = 0;
+    if(parser->info.active_SPS.sps_disp.frame_cropping_flag) {
+        int CropUnitX = 0, CropUnitY = 0, SubWidthC = 0, SubHeightC = 0;
+        int ChromaArrayType = 0;
+        if(parser->info.active_SPS.sps_disp.separate_colour_plane_flag == 0) {
+            if(parser->info.active_SPS.sps_disp.chroma_format_idc == 1) {
+                SubWidthC = 2;
+                SubHeightC = 2;
+            } else if( parser->info.active_SPS.sps_disp.chroma_format_idc == 2) {
+                SubWidthC = 2;
+                SubHeightC = 1;
+            } else if( parser->info.active_SPS.sps_disp.chroma_format_idc == 3) {
+                SubWidthC = 1;
+                SubHeightC = 1;
+            }
+            ChromaArrayType = parser->info.active_SPS.sps_disp.chroma_format_idc;
+        }
+
+        if(ChromaArrayType == 0) {
+            CropUnitX = 1;
+            CropUnitY = 2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+        } else {
+            CropUnitX = SubWidthC;
+            CropUnitY = SubHeightC * ( 2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag);
+        }
+
+        codec_data->crop_left = CropUnitX * parser->info.active_SPS.sps_disp.frame_crop_rect_left_offset;
+        codec_data->crop_right = CropUnitX * parser->info.active_SPS.sps_disp.frame_crop_rect_right_offset; // + 1;
+        codec_data->crop_top = CropUnitY * parser->info.active_SPS.sps_disp.frame_crop_rect_top_offset;
+        codec_data->crop_bottom = CropUnitY * parser->info.active_SPS.sps_disp.frame_crop_rect_bottom_offset; // + 1;
+    }
+
+    /* aspect ratio */
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.aspect_ratio_info_present_flag)
+    {
+        codec_data->aspect_ratio_idc =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.aspect_ratio_idc;
+
+        if (codec_data->aspect_ratio_idc < 17)
+        {
+            codec_data->sar_width = h264_aspect_ratio_table[codec_data->aspect_ratio_idc][0];
+            codec_data->sar_height = h264_aspect_ratio_table[codec_data->aspect_ratio_idc][1];
+        }
+        else if (codec_data->aspect_ratio_idc == 255)
+        {
+            codec_data->sar_width =
+                parser->info.active_SPS.sps_disp.vui_seq_parameters.sar_width;
+
+            codec_data->sar_height =
+                parser->info.active_SPS.sps_disp.vui_seq_parameters.sar_height;
+        }
+        else
+        {
+            codec_data->sar_width = 0;
+            codec_data->sar_height = 0;
+        }
+    }
+    else
+    {
+        // unspecified
+        codec_data->aspect_ratio_idc = 0;
+        codec_data->sar_width = 0;
+        codec_data->sar_height = 0;
+    }
+
+    /* video format */
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.video_signal_type_present_flag)
+    {
+        codec_data->video_format =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.video_format;
+    }
+    else
+    {
+        // Unspecified video format
+        codec_data->video_format = 5;
+    }
+
+    codec_data->video_full_range_flag =
+        parser->info.active_SPS.sps_disp.vui_seq_parameters.video_full_range_flag;
+
+
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.colour_description_present_flag)
+    {
+        codec_data->matrix_coefficients =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.matrix_coefficients;
+    }
+    else
+    {
+        // Unspecified
+        codec_data->matrix_coefficients = 2;
+    }
+
+    codec_data->bit_rate = parser->info.active_SPS.sps_disp.vui_seq_parameters.bit_rate_value;
+
+    /* picture order type and count */
+    codec_data->log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+    codec_data->pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+
+
+    /* udpate sps and pps status */
+    query_data->new_sps = (seq_parameter_set_id != parser->info.active_PPS.seq_parameter_set_id) ? 1 : 0;
+    query_data->new_pps = (pic_parameter_set_id != parser->info.active_PPS.pic_parameter_set_id) ? 1 : 0;
+    query_data->has_sps = parser->info.active_SPS.seq_parameter_set_id != 0xff;
+    query_data->has_pps = parser->info.active_PPS.seq_parameter_set_id != 0xff;
+    if ( frame_width != codec_data->frame_width || frame_height != codec_data->frame_height)
+    {
+        query_data->new_sps = 1;
+        query_data->new_pps = 1;
+    }
+}
+
+
+static uint32_t vbp_add_pic_data_h264secure(vbp_context *pcontext, int list_index)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    struct h264_viddec_parser* parser = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+    VAPictureParameterBufferH264* pic_parms = NULL;
+
+    parser = (struct h264_viddec_parser *)cxt->codec_data;
+
+    if (0 == parser->info.SliceHeader.first_mb_in_slice)
+    {
+        /* a new picture is parsed */
+        query_data->num_pictures++;
+    }
+
+    if (query_data->num_pictures == 0)
+    {
+        /* partial frame */
+        query_data->num_pictures = 1;
+    }
+
+    if (query_data->num_pictures > MAX_NUM_PICTURES)
+    {
+        ETRACE("num of pictures exceeds the limit (%d).", MAX_NUM_PICTURES);
+        return VBP_DATA;
+    }
+
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        WTRACE("MB address does not start from 0!");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+    pic_parms = pic_data->pic_parms;
+
+    // relax this condition to support partial frame parsing
+
+    //if (parser->info.SliceHeader.first_mb_in_slice == 0)
+    {
+        /**
+        * picture parameter only needs to be set once,
+        * even multiple slices may be encoded
+        */
+
+        /* VAPictureParameterBufferH264 */
+        pic_parms->CurrPic.picture_id = VA_INVALID_SURFACE;
+        pic_parms->CurrPic.frame_idx = 0;
+        if (parser->info.img.field_pic_flag == 1)
+        {
+            if (parser->info.img.bottom_field_flag)
+            {
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_BOTTOM_FIELD;
+            }
+            else
+            {
+                /* also OK set to 0 (from test suite) */
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_TOP_FIELD;
+            }
+        }
+        else
+        {
+            pic_parms->CurrPic.flags = 0; /* frame picture */
+        }
+        pic_parms->CurrPic.TopFieldOrderCnt = parser->info.img.toppoc;
+        pic_parms->CurrPic.BottomFieldOrderCnt = parser->info.img.bottompoc;
+        pic_parms->CurrPic.frame_idx = parser->info.SliceHeader.frame_num;
+        /* don't care if current frame is used as long term reference */
+        if (parser->info.SliceHeader.nal_ref_idc != 0)
+        {
+            pic_parms->CurrPic.flags |= VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+        }
+
+        pic_parms->picture_width_in_mbs_minus1 = parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1;
+
+        /* frame height in MBS */
+        pic_parms->picture_height_in_mbs_minus1 = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+                (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) - 1;
+
+        pic_parms->bit_depth_luma_minus8 = parser->info.active_SPS.bit_depth_luma_minus8;
+        pic_parms->bit_depth_chroma_minus8 = parser->info.active_SPS.bit_depth_chroma_minus8;
+
+
+        pic_parms->seq_fields.value = 0;
+        pic_parms->seq_fields.bits.chroma_format_idc = parser->info.active_SPS.sps_disp.chroma_format_idc;
+        pic_parms->seq_fields.bits.residual_colour_transform_flag = parser->info.active_SPS.residual_colour_transform_flag;
+        pic_parms->seq_fields.bits.frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+        pic_parms->seq_fields.bits.mb_adaptive_frame_field_flag = parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag;
+        pic_parms->seq_fields.bits.direct_8x8_inference_flag = parser->info.active_SPS.sps_disp.direct_8x8_inference_flag;
+
+        /* new fields in libva 0.31 */
+        pic_parms->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = parser->info.active_SPS.gaps_in_frame_num_value_allowed_flag;
+        pic_parms->seq_fields.bits.log2_max_frame_num_minus4 = parser->info.active_SPS.log2_max_frame_num_minus4;
+        pic_parms->seq_fields.bits.pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+        pic_parms->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+        pic_parms->seq_fields.bits.delta_pic_order_always_zero_flag =parser->info.active_SPS.delta_pic_order_always_zero_flag;
+
+
+        /* referened from UMG_Moorstown_TestSuites */
+        pic_parms->seq_fields.bits.MinLumaBiPredSize8x8 = (parser->info.active_SPS.level_idc > 30) ? 1 : 0;
+
+        pic_parms->num_slice_groups_minus1 = parser->info.active_PPS.num_slice_groups_minus1;
+        pic_parms->slice_group_map_type = parser->info.active_PPS.slice_group_map_type;
+        pic_parms->slice_group_change_rate_minus1 = 0;
+        pic_parms->pic_init_qp_minus26 = parser->info.active_PPS.pic_init_qp_minus26;
+        pic_parms->pic_init_qs_minus26 = 0;
+        pic_parms->chroma_qp_index_offset = parser->info.active_PPS.chroma_qp_index_offset;
+        pic_parms->second_chroma_qp_index_offset = parser->info.active_PPS.second_chroma_qp_index_offset;
+
+        pic_parms->pic_fields.value = 0;
+        pic_parms->pic_fields.bits.entropy_coding_mode_flag = parser->info.active_PPS.entropy_coding_mode_flag;
+        pic_parms->pic_fields.bits.weighted_pred_flag = parser->info.active_PPS.weighted_pred_flag;
+        pic_parms->pic_fields.bits.weighted_bipred_idc = parser->info.active_PPS.weighted_bipred_idc;
+        pic_parms->pic_fields.bits.transform_8x8_mode_flag = parser->info.active_PPS.transform_8x8_mode_flag;
+
+        /* new LibVA fields in v0.31*/
+        pic_parms->pic_fields.bits.pic_order_present_flag = parser->info.active_PPS.pic_order_present_flag;
+        pic_parms->pic_fields.bits.deblocking_filter_control_present_flag = parser->info.active_PPS.deblocking_filter_control_present_flag;
+        pic_parms->pic_fields.bits.redundant_pic_cnt_present_flag = parser->info.active_PPS.redundant_pic_cnt_present_flag;
+        pic_parms->pic_fields.bits.reference_pic_flag = parser->info.SliceHeader.nal_ref_idc != 0;
+
+        /* all slices in the pciture have the same field_pic_flag */
+        pic_parms->pic_fields.bits.field_pic_flag = parser->info.SliceHeader.field_pic_flag;
+        pic_parms->pic_fields.bits.constrained_intra_pred_flag = parser->info.active_PPS.constrained_intra_pred_flag;
+
+        pic_parms->frame_num = parser->info.SliceHeader.frame_num;
+
+        pic_parms->num_ref_idx_l0_default_active_minus1 = parser->info.active_PPS.num_ref_idx_l0_active-1;
+        pic_parms->num_ref_idx_l1_default_active_minus1 = parser->info.active_PPS.num_ref_idx_l1_active-1;
+    }
+
+
+    /* set reference frames, and num_ref_frames */
+    vbp_set_reference_frames_h264secure(parser, pic_parms);
+    if (parser->info.nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        int frame_idx;
+        for (frame_idx = 0; frame_idx < 16; frame_idx++)
+        {
+            pic_parms->ReferenceFrames[frame_idx].picture_id = VA_INVALID_SURFACE;
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = 0;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_INVALID;
+            pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = 0;
+            pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = 0;
+        }
+    }
+
+    return VBP_OK;
+}
+
+static uint32_t vbp_add_slice_data_h264secure(vbp_context *pcontext, int index)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    uint32 bit, byte;
+    uint8 is_emul;
+
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    VASliceParameterBufferH264 *slc_parms = NULL;
+    vbp_slice_data_h264 *slc_data = NULL;
+    struct h264_viddec_parser* h264_parser = NULL;
+    h264_Slice_Header_t* slice_header = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+
+
+    h264_parser = (struct h264_viddec_parser *)cxt->codec_data;
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        ETRACE("invalid picture data index.");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+
+    slc_data = &(pic_data->slc_data[pic_data->num_slices]);
+    slc_data->buffer_addr = cxt->parse_cubby.buf;
+    slc_parms = &(slc_data->slc_parms);
+
+    /* byte: how many bytes have been parsed */
+    /* bit: bits parsed within the current parsing position */
+    viddec_pm_get_au_pos(cxt, &bit, &byte, &is_emul);
+
+    slc_data->nal_unit_type = h264_parser->info.nal_unit_type;
+
+    slc_parms->slice_data_size = slc_data->slice_size =
+                                     pcontext->parser_cxt->list.data[index].edpos -
+                                     pcontext->parser_cxt->list.data[index].stpos;
+
+    slc_parms->slice_data_offset = 0;
+
+    /* whole slice is in this buffer */
+    slc_parms->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
+
+    /* the offset to the NAL start code for this slice */
+    slc_data->slice_offset = cxt->list.data[index].stpos;
+
+    slice_header = &(h264_parser->info.SliceHeader);
+    slc_parms->first_mb_in_slice = slice_header->first_mb_in_slice;
+
+    if (h264_parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag &
+            (!(h264_parser->info.SliceHeader.field_pic_flag)))
+    {
+        slc_parms->first_mb_in_slice /= 2;
+    }
+
+    pic_data->num_slices++;
+
+    //vbp_update_reference_frames_h264_methodB(pic_data);
+    if (pic_data->num_slices > MAX_NUM_SLICES)
+    {
+        ETRACE("number of slices per picture exceeds the limit (%d).", MAX_NUM_SLICES);
+        return VBP_DATA;
+    }
+
+    return VBP_OK;
+}
+
+
+static uint32_t vbp_update_slice_data_h264secure(vbp_context *pcontext, int index)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    uint32 bit, byte;
+    uint8 is_emul;
+    vbp_h264secure_parser_private *parser_private = (vbp_h264secure_parser_private *) pcontext->parser_private;
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    VASliceParameterBufferH264 *slc_parms = NULL;
+    vbp_slice_data_h264 *slc_data = NULL;
+    struct h264_viddec_parser* h264_parser = NULL;
+    h264_Slice_Header_t* slice_header = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+
+    h264_parser = (struct h264_viddec_parser *)cxt->codec_data;
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        ETRACE("invalid picture data index.");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+    slc_data = &(pic_data->slc_data[pic_data->num_slices]);
+    slc_parms = &(slc_data->slc_parms);
+
+    slc_parms->slice_data_size = parser_private->size;
+    slc_parms->slice_data_offset = parser_private->offset;
+
+    /* whole slice is in this buffer */
+    slc_parms->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
+
+    /* the offset to the NAL start code for this slice */
+    slc_data->slice_offset = 0;
+    slc_data->buffer_addr  = parser_private->start;
+    slc_data->slice_size = parser_private->size + parser_private->offset;
+
+    slice_header = &(h264_parser->info.SliceHeader);
+    slc_parms->first_mb_in_slice = slice_header->first_mb_in_slice;
+
+    if (h264_parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag &
+            (!(h264_parser->info.SliceHeader.field_pic_flag)))
+    {
+        slc_parms->first_mb_in_slice /= 2;
+    }
+
+    pic_data->num_slices++;
+
+    if (pic_data->num_slices > MAX_NUM_SLICES)
+    {
+        ETRACE("number of slices per picture exceeds the limit (%d).", MAX_NUM_SLICES);
+        return VBP_DATA;
+    }
+
+    return VBP_OK;
+}
+
+
+
+/**
+* parse decoder configuration data
+*/
+uint32 vbp_parse_init_data_h264secure(vbp_context* pcontext)
+{
+    /* parsing AVCDecoderConfigurationRecord structure (see MPEG-4 part 15 spec) */
+
+    uint8 configuration_version = 0;
+    uint8 AVC_profile_indication = 0;
+    uint8 profile_compatibility = 0;
+    uint8 AVC_level_indication = 0;
+    uint8 length_size_minus_one = 0;
+    uint8 num_of_sequence_parameter_sets = 0;
+    uint8 num_of_picture_parameter_sets = 0;
+    uint16 sequence_parameter_set_length = 0;
+    uint16 picture_parameter_set_length = 0;
+
+    int i = 0;
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+
+    vbp_h264secure_parser_private *parser_private = (vbp_h264secure_parser_private *)pcontext->parser_private;
+    //Enable emulation prevention
+    cxt->getbits.is_emul_reqd = 1;
+
+    /* check if configuration data is start code prefix */
+    viddec_sc_parse_cubby_cxt_t cubby = cxt->parse_cubby;
+    viddec_parser_ops_t *ops = pcontext->parser_ops;
+    int ret = ops->parse_sc((void *)&cubby,
+                            NULL, /* context, not used */
+                            &(cxt->sc_prefix_info));
+    if (ret == 1)
+    {
+        WTRACE("configuration data is start-code prefixed.\n");
+        parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+        return vbp_parse_start_code_h264secure(pcontext);
+    }
+
+
+    uint8* cur_data = cxt->parse_cubby.buf;
+
+
+    if (cxt->parse_cubby.size < 6)
+    {
+        /* need at least 6 bytes to start parsing the structure, see spec 15 */
+        return VBP_DATA;
+    }
+
+    configuration_version = *cur_data++;
+    AVC_profile_indication = *cur_data++;
+
+    /*ITRACE("Profile indication: %d", AVC_profile_indication); */
+
+    profile_compatibility = *cur_data++;
+    AVC_level_indication = *cur_data++;
+
+    /* ITRACE("Level indication: %d", AVC_level_indication);*/
+    /* 2 bits of length_size_minus_one, 6 bits of reserved (11111) */
+    length_size_minus_one = (*cur_data) & 0x3;
+
+    if (length_size_minus_one != 3)
+    {
+        WTRACE("length size (%d) is not equal to 4.", length_size_minus_one + 1);
+    }
+
+    parser_private->NAL_length_size = length_size_minus_one + 1;
+
+    cur_data++;
+
+    /* 3 bits of reserved (111) and 5 bits of num_of_sequence_parameter_sets */
+    num_of_sequence_parameter_sets = (*cur_data) & 0x1f;
+    if (num_of_sequence_parameter_sets > 1)
+    {
+        WTRACE("num_of_sequence_parameter_sets is %d.", num_of_sequence_parameter_sets);
+    }
+    if (num_of_sequence_parameter_sets > MAX_NUM_SPS)
+    {
+        /* this would never happen as MAX_NUM_SPS = 32 */
+        WTRACE("num_of_sequence_parameter_sets (%d) exceeds the limit (%d).", num_of_sequence_parameter_sets, MAX_NUM_SPS);
+    }
+    cur_data++;
+
+    cxt->list.num_items = 0;
+    for (i = 0; i < num_of_sequence_parameter_sets; i++)
+    {
+        if (cur_data - cxt->parse_cubby.buf + 2 > cxt->parse_cubby.size)
+        {
+            /* need at least 2 bytes to parse sequence_parameter_set_length */
+            ETRACE("Not enough data to parse SPS length.");
+            return VBP_DATA;
+        }
+
+        /* 16 bits */
+        sequence_parameter_set_length = vbp_utils_ntohs(cur_data);
+
+
+        cur_data += 2;
+
+        if (cur_data - cxt->parse_cubby.buf + sequence_parameter_set_length > cxt->parse_cubby.size)
+        {
+            /* need at least sequence_parameter_set_length bytes for SPS */
+            ETRACE("Not enough data to parse SPS.");
+            return VBP_DATA;
+        }
+
+        cxt->list.data[cxt->list.num_items].stpos = cur_data - cxt->parse_cubby.buf;
+
+        /* end pos is exclusive */
+        cxt->list.data[cxt->list.num_items].edpos =
+            cxt->list.data[cxt->list.num_items].stpos + sequence_parameter_set_length;
+
+        cxt->list.num_items++;
+
+        cur_data += sequence_parameter_set_length;
+    }
+
+    if (cur_data - cxt->parse_cubby.buf + 1 > cxt->parse_cubby.size)
+    {
+        /* need at least one more byte to parse num_of_picture_parameter_sets */
+        ETRACE("Not enough data to parse number of PPS.");
+        return VBP_DATA;
+    }
+
+    num_of_picture_parameter_sets = *cur_data++;
+    if (num_of_picture_parameter_sets > 1)
+    {
+        /* g_warning("num_of_picture_parameter_sets is %d.", num_of_picture_parameter_sets); */
+    }
+
+    for (i = 0; i < num_of_picture_parameter_sets; i++)
+    {
+        if (cur_data - cxt->parse_cubby.buf + 2 > cxt->parse_cubby.size)
+        {
+            /* need at least 2 bytes to parse picture_parameter_set_length */
+            ETRACE("Not enough data to parse PPS length.");
+            return VBP_DATA;
+        }
+
+        /* 16 bits */
+        picture_parameter_set_length = vbp_utils_ntohs(cur_data);
+
+        cur_data += 2;
+
+        if (cur_data - cxt->parse_cubby.buf + picture_parameter_set_length > cxt->parse_cubby.size)
+        {
+            /* need at least picture_parameter_set_length bytes for PPS */
+            ETRACE("Not enough data to parse PPS.");
+            return VBP_DATA;
+        }
+
+        cxt->list.data[cxt->list.num_items].stpos = cur_data - cxt->parse_cubby.buf;
+
+        /* end pos is exclusive */
+        cxt->list.data[cxt->list.num_items].edpos =
+            cxt->list.data[cxt->list.num_items].stpos + picture_parameter_set_length;
+
+        cxt->list.num_items++;
+
+        cur_data += picture_parameter_set_length;
+    }
+
+    if ((cur_data - cxt->parse_cubby.buf) !=  cxt->parse_cubby.size)
+    {
+        WTRACE("Not all initialization data is parsed. Size = %d, parsed = %d.",
+               cxt->parse_cubby.size, (cur_data - cxt->parse_cubby.buf));
+    }
+
+    parser_private->bitstream_pattern = H264_BS_LENGTH_PREFIXED;
+    return VBP_OK;
+}
+
+static inline uint32_t vbp_get_NAL_length_h264(uint8_t* p, int *NAL_length_size)
+{
+    switch (*NAL_length_size)
+    {
+    case 4:
+        return vbp_utils_ntohl(p);
+
+    case 3:
+    {
+        uint32_t i = ((*p) << 16) + ((*(p+1)) << 8) + ((*(p+2)));
+        return i;
+    }
+
+    case 2:
+        return vbp_utils_ntohs(p);
+
+    case 1:
+        return *p;
+
+    default:
+        WTRACE("invalid NAL_length_size: %d.", NAL_length_size);
+        /* default to 4 bytes for length */
+        *NAL_length_size = 4;
+        return vbp_utils_ntohl(p);
+    }
+}
+
+/**
+** H.264 elementary stream does not have start code.
+* instead, it is comprised of size of NAL unit and payload
+* of NAL unit. See spec 15 (Sample format)
+*/
+
+/* Start code prefix is 001 which is 3 bytes. */
+#define H264_SC_SIZE 3
+uint32 vbp_parse_start_code_h264secure(vbp_context *pcontext)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    vbp_h264secure_parser_private *parser_private = (vbp_h264secure_parser_private *)pcontext->parser_private;
+
+    /* reset query data for the new sample buffer */
+    vbp_data_h264* query_data = (vbp_data_h264*)pcontext->query_data;
+    int i;
+
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].num_slices = 0;
+    }
+    query_data->num_pictures = 0;
+
+    cxt->list.num_items = 0;
+
+    /* reset start position of first item to 0 in case there is only one item */
+    cxt->list.data[0].stpos = 0;
+
+    /* start code emulation prevention byte is present in NAL */
+    cxt->getbits.is_emul_reqd = 1;
+
+    if (parser_private->bitstream_pattern == H264_BS_LENGTH_PREFIXED)
+    {
+        viddec_sc_parse_cubby_cxt_t* cubby = NULL;
+        int32_t size_left = 0;
+        int32_t size_parsed = 0;
+        int32_t NAL_length = 0;
+
+        cubby = &(cxt->parse_cubby);
+
+        size_left = cubby->size;
+
+        while (size_left >= parser_private->NAL_length_size)
+        {
+            NAL_length = vbp_get_NAL_length_h264(cubby->buf + size_parsed, &parser_private->NAL_length_size);
+            if (NAL_length <= 0 || NAL_length > size_left - parser_private->NAL_length_size)
+            {
+                ETRACE("Invalid NAL_length parsed.");
+                break;
+            }
+
+            size_parsed += parser_private->NAL_length_size;
+            cxt->list.data[cxt->list.num_items].stpos = size_parsed;
+            size_parsed += NAL_length; /* skip NAL bytes */
+            /* end position is exclusive */
+            cxt->list.data[cxt->list.num_items].edpos = size_parsed;
+            cxt->list.num_items++;
+            if (cxt->list.num_items >= MAX_IBUFS_PER_SC)
+            {
+                ETRACE("num of list items exceeds the limit (%d).", MAX_IBUFS_PER_SC);
+                break;
+            }
+
+            size_left = cubby->size - size_parsed;
+        }
+
+        if (size_left != 0 && parser_private->length_prefix_verified == 0)
+        {
+            WTRACE("Elementary stream is not aligned (%d).", size_left);
+
+            /* attempt to correct length prefix to start-code prefix only once, if it succeeds, we will
+                    * alway treat bit stream as start-code prefixed; otherwise, treat bit stream as length prefixed
+                    */
+            parser_private->length_prefix_verified = 1;
+            viddec_sc_parse_cubby_cxt_t temp_cubby = cxt->parse_cubby;
+
+            viddec_parser_ops_t *ops = pcontext->parser_ops;
+            int ret = ops->parse_sc((void *)&temp_cubby,
+                                    NULL, /* context, not used */
+                                    &(cxt->sc_prefix_info));
+
+            /* found start code */
+            if (ret == 1)
+            {
+                WTRACE("Stream was supposed to be length prefixed, but actually is start-code prefixed.");
+                parser_private->NAL_length_size = 0;
+                parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+                /* reset parsing data */
+                for (i = 0; i < MAX_NUM_PICTURES; i++)
+                {
+                    query_data->pic_data[i].num_slices = 0;
+                }
+                query_data->num_pictures = 0;
+                cxt->list.num_items = 0;
+            }
+        }
+    }
+
+
+    if (parser_private->bitstream_pattern == H264_BS_SC_PREFIXED)
+    {
+        viddec_sc_parse_cubby_cxt_t cubby;
+        /*  memory copy without updating cxt->parse_cubby */
+        cubby = cxt->parse_cubby;
+        viddec_parser_ops_t *ops = pcontext->parser_ops;
+        int ret = 0;
+
+        while (1)
+        {
+            ret = ops->parse_sc((void *)&cubby,
+                                NULL, /* context, not used */
+                                &(cxt->sc_prefix_info));
+            if (ret == 1)
+            {
+                if (cxt->list.num_items == 0)
+                {
+                    cxt->list.data[0].stpos = cubby.sc_end_pos;
+                }
+                else
+                {
+                    cxt->list.data[cxt->list.num_items].stpos =
+                        cubby.sc_end_pos + cxt->list.data[cxt->list.num_items - 1].stpos;
+                    cxt->list.data[cxt->list.num_items - 1].edpos = cxt->list.data[cxt->list.num_items].stpos - H264_SC_SIZE;
+                }
+
+                cubby.phase = 0;
+                cubby.buf = cxt->parse_cubby.buf +
+                            cxt->list.data[cxt->list.num_items].stpos;
+
+                cubby.size = cxt->parse_cubby.size -
+                             cxt->list.data[cxt->list.num_items].stpos;
+
+                cxt->list.num_items++;
+                if (cxt->list.num_items >= MAX_IBUFS_PER_SC)
+                {
+                    WTRACE("Num items exceeds the limit!");
+                    /* not fatal, just stop parsing */
+                    break;
+                }
+            }
+            else
+            {
+                if (cxt->list.num_items == 0)
+                {
+                    cxt->list.num_items = 1;
+                    parser_private->bitstream_pattern = H264_BS_SINGLE_NAL;
+                    WTRACE("Stream was supposed to be SC prefixed, but actually contains a single NAL.");
+                }
+                cxt->list.data[cxt->list.num_items - 1].edpos = cxt->parse_cubby.size;
+                break;
+            }
+        }
+
+    }
+
+    if (parser_private->bitstream_pattern == H264_BS_SINGLE_NAL)
+    {
+        cxt->list.num_items = 1;
+        cxt->list.data[0].stpos = 0;
+        cxt->list.data[0].edpos = cxt->parse_cubby.size;
+    }
+
+    return VBP_OK;
+}
+
+/**
+*
+* process parsing result after a NAL unit is parsed
+*
+*/
+uint32 vbp_process_parsing_result_h264secure( vbp_context *pcontext, int i)
+{
+    if (i >= MAX_NUM_SLICES)
+    {
+        return VBP_PARM;
+    }
+
+    uint32 error = VBP_OK;
+
+    struct h264_viddec_parser* parser = NULL;
+    parser = (struct h264_viddec_parser *)&( pcontext->parser_cxt->codec_data[0]);
+    vbp_data_h264* query_data = (vbp_data_h264 *)pcontext->query_data;
+    switch (parser->info.nal_unit_type)
+    {
+    case h264_NAL_UNIT_TYPE_SLICE:
+        VTRACE("slice header is parsed.");
+        error = vbp_add_pic_data_h264secure(pcontext, i);
+        if (VBP_OK == error)
+        {
+            error = vbp_add_slice_data_h264secure(pcontext, i);
+        }
+        break;
+
+    case  h264_NAL_UNIT_TYPE_IDR:
+        VTRACE("IDR header is parsed.");
+        error = vbp_add_pic_data_h264secure(pcontext, i);
+        if (VBP_OK == error)
+        {
+            error = vbp_add_slice_data_h264secure(pcontext, i);
+        }
+        break;
+    case h264_NAL_UNIT_TYPE_SEI:
+        //ITRACE("SEI header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_SPS:
+        VTRACE("SPS header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_PPS:
+        VTRACE("PPS header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+        VTRACE("ACC unit delimiter is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_EOSeq:
+        ITRACE("EOSeq is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_EOstream:
+        ITRACE("EOStream is parsed");
+        break;
+
+    default:
+        WTRACE("unknown header %d is parsed.", parser->info.nal_unit_type);
+        break;
+    }
+
+    if (query_data->num_pictures == MAX_NUM_PICTURES && parser->info.img.field_pic_flag != 1)
+    {
+        WTRACE("more than one frame in the buffer is found(%d)", query_data->num_pictures);
+        return (error == VBP_OK ? VBP_MULTI : error);
+    }
+    return error;
+}
+
+/*
+*
+* fill query data structure after sample buffer is parsed
+*
+*/
+uint32 vbp_populate_query_data_h264secure(vbp_context *pcontext)
+{
+    vbp_data_h264 *query_data = NULL;
+    struct h264_viddec_parser *parser = NULL;
+    struct vbp_h264_parser_private_t* private = NULL;
+
+    parser = (struct h264_viddec_parser *)pcontext->parser_cxt->codec_data;
+    query_data = (vbp_data_h264 *)pcontext->query_data;
+    private = (struct vbp_h264_parser_private_t *)pcontext->parser_private;
+
+    vbp_set_codec_data_h264secure(parser, query_data);
+
+    /* buffer number */
+    query_data->buf_number = buffer_counter;
+
+    /* VQIAMatrixBufferH264 */
+    vbp_set_scaling_list_h264secure(parser, query_data->IQ_matrix_buf);
+
+    if (query_data->num_pictures > 0)
+    {
+     /*
+        * picture parameter buffer and slice parameter buffer have been populated
+        */
+    }
+    else
+    {
+        /**
+        * add a dummy picture that contains picture parameters parsed
+          from SPS and PPS.
+        */
+        vbp_add_pic_data_h264secure(pcontext, 0);
+    }
+
+    return VBP_OK;
+}
+
+uint32 vbp_update_data_h264secure(vbp_context *pcontext, void *newdata, uint32 size)
+{
+    uint32 error = VBP_OK;
+    uint32 offset = 0;
+    uint32 key = 0;
+    uint32 i,j;
+
+    vbp_data_h264* query_data = (vbp_data_h264*)pcontext->query_data;
+
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].num_slices = 0;
+    }
+    query_data->num_pictures = 0;
+
+    vbp_h264secure_parser_private *parser_private = (vbp_h264secure_parser_private *) pcontext->parser_private;
+
+    int32_t  sliceheadersize;
+    uint32_t slice_num = 0;
+    while (offset < size) {
+        memcpy(&key, (uint8_t *)newdata+offset, sizeof(uint32_t));
+        if (key == TERMINATE_KEY) {
+            break;
+        }
+        slice_num++;
+        offset += sizeof(uint32_t);
+
+        memcpy(&parser_private->start, (uint8_t *)newdata+offset, 4);
+        offset += 4;
+
+        memcpy(&parser_private->offset, (uint8_t *)newdata+offset, sizeof(int32_t));
+        offset += 4;
+
+        memcpy(&parser_private->size, (uint8_t *)newdata+offset, sizeof(int32_t));
+        offset += 4;
+
+        sliceheadersize = sizeof(slice_header_t) + sizeof(dec_ref_pic_marking_t);
+        error = pcontext->parser_ops->update_data(pcontext->parser_cxt,
+                                                  newdata+offset, sliceheadersize);
+        offset += sliceheadersize;
+        if (error != VBP_OK)
+        {
+            ETRACE("update_data error = 0x%x",error);
+            return error;
+        }
+
+        error = vbp_add_pic_data_h264secure(pcontext, slice_num);
+        if (error != VBP_OK)
+        {
+            ETRACE("vbp_add_pic_data_h264secure error = 0x%x",error);
+            return error;
+        }
+
+        error = vbp_update_slice_data_h264secure(pcontext, slice_num);
+        if (error != VBP_OK)
+        {
+            ETRACE("vbp_add_slice_data_h264secure error = 0x%x",error);
+            return error;
+        }
+    }
+    if (key != TERMINATE_KEY)
+    {
+        ETRACE("Don't find a terminated key 0xFFFFFF!");
+        return VBP_DATA;
+    } else {
+        if (slice_num < 1) {
+            ETRACE("Don't find a valid slice header!");
+            return VBP_DATA;
+        }
+    }
+    error = vbp_populate_query_data_h264secure(pcontext);
+
+    if (error != VBP_OK)
+    {
+        ETRACE("vbp_populate_query_data_h264secure error = 0x%x",error);
+        return error;
+    }
+    return error;
+}
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.h b/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.h
new file mode 100644
index 0000000..a55c07c
--- /dev/null
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_h264secure_parser.h
@@ -0,0 +1,70 @@
+/* INTEL CONFIDENTIAL
+* Copyright (c) 2009 Intel Corporation.  All rights reserved.
+*
+* The source code contained or described herein and all documents
+* related to the source code ("Material") are owned by Intel
+* Corporation or its suppliers or licensors.  Title to the
+* Material remains with Intel Corporation or its suppliers and
+* licensors.  The Material contains trade secrets and proprietary
+* and confidential information of Intel or its suppliers and
+* licensors. The Material is protected by worldwide copyright and
+* trade secret laws and treaty provisions.  No part of the Material
+* may be used, copied, reproduced, modified, published, uploaded,
+* posted, transmitted, distributed, or disclosed in any way without
+* Intel's prior express written permission.
+*
+* No license under any patent, copyright, trade secret or other
+* intellectual property right is granted to or conferred upon you
+* by disclosure or delivery of the Materials, either expressly, by
+* implication, inducement, estoppel or otherwise. Any license
+* under such intellectual property rights must be express and
+* approved by Intel in writing.
+*
+*/
+
+
+#ifndef VBP_H264SECURE_PARSER_H
+#define VBP_H264SECURE_PARSER_H
+
+/*
+ * setup parser's entry points
+ */
+uint32 vbp_init_parser_entries_h264secure(vbp_context *pcontext);
+
+/*
+ * allocate query data
+ */
+uint32 vbp_allocate_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * free query data
+ */
+uint32 vbp_free_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * parse initialization data
+ */
+uint32 vbp_parse_init_data_h264secure(vbp_context *pcontext);
+
+/*
+ * parse start code. Only support lenght prefixed mode. Start
+ * code prefixed is not supported.
+ */
+uint32 vbp_parse_start_code_h264secure(vbp_context *pcontext);
+
+/*
+ * process parsing result
+ */
+uint32 vbp_process_parsing_result_h264secure(vbp_context *pcontext, int list_index);
+
+/*
+ * query parsing result
+ */
+uint32 vbp_populate_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * update the parsing result with extra data
+ */
+uint32 vbp_update_data_h264secure(vbp_context *pcontext, void *newdata, uint32 size);
+
+#endif /*VBP_H264_PARSER_H*/
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_loader.c b/mix_vbp/viddec_fw/fw/parser/vbp_loader.c
index 7797a78..972ab2d 100644
--- a/mix_vbp/viddec_fw/fw/parser/vbp_loader.c
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_loader.c
@@ -173,3 +173,33 @@
 
     return error;
 }
+
+#ifdef USE_AVC_SHORT_FORMAT
+uint32 vbp_update(Handle hcontext, void *newdata, uint32 size, void **data)
+{
+    vbp_context *pcontext;
+    uint32 error = VBP_OK;
+
+    if ((NULL == hcontext) || (NULL == newdata) || (0 == size) || (NULL == data))
+    {
+        ETRACE("Invalid input parameters.");
+        return VBP_PARM;
+    }
+
+    pcontext = (vbp_context *)hcontext;
+
+    if (MAGIC_NUMBER != pcontext->identifier)
+    {
+        ETRACE("context is not initialized");
+        return VBP_INIT;
+    }
+
+    error = vbp_utils_update(pcontext, newdata, size, data);
+
+    if (VBP_OK != error)
+    {
+        ETRACE("Failed to query parsing result: %d.", error);
+    }
+    return error;
+}
+#endif
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_loader.h b/mix_vbp/viddec_fw/fw/parser/vbp_loader.h
index 0655e07..ad4b106 100644
--- a/mix_vbp/viddec_fw/fw/parser/vbp_loader.h
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_loader.h
@@ -404,7 +404,10 @@
     VBP_MPEG4,
     VBP_H264,
 #ifdef USE_HW_VP8
-    VBP_VP8
+    VBP_VP8,
+#endif
+#ifdef USE_AVC_SHORT_FORMAT
+    VBP_H264SECURE,
 #endif
 };
 
@@ -457,5 +460,17 @@
 uint32 vbp_flush(Handle hcontent);
 
 
+#ifdef USE_AVC_SHORT_FORMAT
+/*
+ * update the the vbp context using the new data
+ * @param hcontext: handle to VBP context.
+ * @param data: pointer to the new data buffer.
+ * @param size: size of new data buffer.
+ * @param data: pointer to hold a data blob that contains parsing result.
+ * @returns VBP_OK on success, anything else on failure.
+ *
+*/
+uint32 vbp_update(Handle hcontext, void *newdata, uint32 size, void **data);
+#endif
 
 #endif /* VBP_LOADER_H */
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_utils.c b/mix_vbp/viddec_fw/fw/parser/vbp_utils.c
index 3983387..527fc6a 100644
--- a/mix_vbp/viddec_fw/fw/parser/vbp_utils.c
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_utils.c
@@ -35,6 +35,9 @@
 #ifdef USE_HW_VP8
 #include "vbp_vp8_parser.h"
 #endif
+#ifdef USE_AVC_SHORT_FORMAT
+#include "vbp_h264secure_parser.h"
+#endif
 
 
 /* buffer counter */
@@ -127,6 +130,13 @@
 #endif
         break;
 #endif
+
+#ifdef USE_AVC_SHORT_FORMAT
+    case VBP_H264SECURE:
+        parser_name = "libmixvbp_h264secure.so";
+        break;
+#endif
+
     default:
         WTRACE("Unsupported parser type!");
         return VBP_TYPE;
@@ -167,7 +177,15 @@
 #ifdef USE_HW_VP8
         SET_FUNC_POINTER(VBP_VP8, vp8);
 #endif
+#ifdef USE_AVC_SHORT_FORMAT
+        SET_FUNC_POINTER(VBP_H264SECURE, h264secure);
+#endif
     }
+#ifdef USE_AVC_SHORT_FORMAT
+    if (pcontext->parser_type == VBP_H264SECURE) {
+        pcontext->func_update_data = vbp_update_data_h264secure;
+    }
+#endif
 
     /* set entry points for parser operations:
     	init
@@ -584,3 +602,28 @@
     return VBP_OK;
 }
 
+
+#ifdef USE_AVC_SHORT_FORMAT
+/**
+ *
+ * provide query data back to the consumer
+ *
+ */
+uint32 vbp_utils_update(vbp_context *pcontext, void *newdata, uint32 size, void **data)
+{
+    /* entry point, not need to validate input parameters. */
+    uint32 error = VBP_OK;
+
+    error = pcontext->func_update_data(pcontext,newdata,size);
+
+    if (VBP_OK == error)
+    {
+        *data = pcontext->query_data;
+    }
+    else
+    {
+        *data = NULL;
+    }
+    return error;
+}
+#endif
diff --git a/mix_vbp/viddec_fw/fw/parser/vbp_utils.h b/mix_vbp/viddec_fw/fw/parser/vbp_utils.h
index 073c2c2..7761c26 100644
--- a/mix_vbp/viddec_fw/fw/parser/vbp_utils.h
+++ b/mix_vbp/viddec_fw/fw/parser/vbp_utils.h
@@ -64,8 +64,9 @@
 typedef uint32 (*function_parse_start_code)(vbp_context* cxt);
 typedef uint32 (*function_process_parsing_result)(vbp_context* cxt, int i);
 typedef uint32 (*function_populate_query_data)(vbp_context* cxt);
-
-
+#ifdef USE_AVC_SHORT_FORMAT
+typedef uint32 (*function_update_data)(vbp_context* cxt, void *newdata, uint32 size);
+#endif
 
 struct vbp_context_t
 {
@@ -103,7 +104,9 @@
     function_parse_start_code func_parse_start_code;
     function_process_parsing_result func_process_parsing_result;
     function_populate_query_data func_populate_query_data;
-
+#ifdef USE_AVC_SHORT_FORMAT
+    function_update_data func_update_data;
+#endif
 };
 
 
diff --git a/videodecoder/Android.mk b/videodecoder/Android.mk
index d032d70..7f3add5 100644
--- a/videodecoder/Android.mk
+++ b/videodecoder/Android.mk
@@ -8,7 +8,7 @@
     VideoDecoderMPEG4.cpp \
     VideoDecoderAVC.cpp \
     VideoDecoderPAVC.cpp \
-    VideoDecoderTrace.cpp \
+    VideoDecoderTrace.cpp
 
 # LOCAL_CFLAGS :=
 
@@ -18,11 +18,18 @@
     $(TARGET_OUT_HEADERS)/libmixvbp
 
 ifeq ($(USE_INTEL_SECURE_AVC),true)
+LOCAL_CFLAGS += -DUSE_INTEL_SECURE_AVC
 LOCAL_SRC_FILES += securevideo/$(TARGET_BOARD_PLATFORM)/VideoDecoderAVCSecure.cpp
 LOCAL_C_INCLUDES += $(LOCAL_PATH)/securevideo/$(TARGET_BOARD_PLATFORM)
 LOCAL_CFLAGS += -DUSE_INTEL_SECURE_AVC
 endif
 
+PLATFORM_SUPPORT_AVC_SHORT_FORMAT := \
+    baytrail
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_AVC_SHORT_FORMAT)),)
+    LOCAL_CFLAGS += -DUSE_AVC_SHORT_FORMAT
+endif
+
 ifeq ($(TARGET_BOARD_PLATFORM),baytrail)
 LOCAL_CFLAGS += -DLOAD_PVR_DRIVER
 endif
diff --git a/videodecoder/VideoDecoderAVC.cpp b/videodecoder/VideoDecoderAVC.cpp
index 530d8c9..e3d67dd 100644
--- a/videodecoder/VideoDecoderAVC.cpp
+++ b/videodecoder/VideoDecoderAVC.cpp
@@ -413,6 +413,9 @@
         if (ref->flags & VA_PICTURE_H264_INVALID) {
             continue;
         }
+#ifdef USE_AVC_SHORT_FORMAT
+        ref->picture_id = findSurface(ref);
+#endif
         dpb->poc = getPOC(ref);
         // looking for the latest ref frame in the DPB with specified POC, in case frames have same POC
         dpb->surfaceBuffer = findRefSurfaceBuffer(ref);
@@ -696,15 +699,23 @@
         data->codec_data->crop_right,
         data->codec_data->crop_bottom);
 
+    int diff = data->codec_data->num_ref_frames + 1 - mOutputWindowSize;
+
+#ifndef USE_AVC_SHORT_FORMAT
     // The number of actual buffer needed is
     // outputQueue + nativewindow_owned + (diff > 0 ? diff : 1) + widi_need_max + 1(available buffer)
     // while outputQueue = DPB < 8? DPB :8
     // and diff = Reference + 1 - ouputQueue
-    int diff = data->codec_data->num_ref_frames + 1 - mOutputWindowSize;
     mVideoFormatInfo.actualBufferNeeded = mOutputWindowSize + 4 /* Owned by native window */
                                           + (diff > 0 ? diff : 1)
                                           + 6 /* WiDi maximum needs */
                                           + 1;
+#else
+    // This is for protected video playback on Baytrail
+    mVideoFormatInfo.actualBufferNeeded = mOutputWindowSize + 2 /* Owned by native window */
+                                      + (diff > 0 ? diff : 1)
+                                      + 1;
+#endif
     ITRACE("actualBufferNeeded =%d", mVideoFormatInfo.actualBufferNeeded);
 
     mVideoFormatInfo.valid = true;
diff --git a/videodecoder/VideoDecoderBase.cpp b/videodecoder/VideoDecoderBase.cpp
index 8f95ac2..c12ea5d 100755
--- a/videodecoder/VideoDecoderBase.cpp
+++ b/videodecoder/VideoDecoderBase.cpp
@@ -765,6 +765,10 @@
     CHECK_VA_STATUS("vaInitialize");
 
     if ((int32_t)profile != VAProfileSoftwareDecoding) {
+#ifdef USE_AVC_SHORT_FORMAT
+        status = getCodecSpecificConfigs(profile, &mVAConfig);
+        CHECK_STATUS("getCodecSpecificAttributes");
+#else
         //We are requesting RT attributes
         attrib.type = VAConfigAttribRTFormat;
         attrib.value = VA_RT_FORMAT_YUV420;
@@ -777,6 +781,7 @@
                 1,
                 &mVAConfig);
         CHECK_VA_STATUS("vaCreateConfig");
+#endif
     }
 
     mNumSurfaces = numSurface;
@@ -787,8 +792,10 @@
 
     int32_t format = VA_RT_FORMAT_YUV420;
     if (mConfigBuffer.flag & WANT_SURFACE_PROTECTION) {
+#ifndef USE_AVC_SHORT_FORMAT
         format |= VA_RT_FORMAT_PROTECTED;
         WTRACE("Surface is protected.");
+#endif
     }
     if (mConfigBuffer.flag & USE_NATIVE_GRAPHIC_BUFFER) {
         mVASurfaceAttrib = new VASurfaceAttributeTPI;
@@ -807,7 +814,7 @@
         mVASurfaceAttrib->height = mVideoFormatInfo.height;
         mVASurfaceAttrib->type = VAExternalMemoryAndroidGrallocBuffer;
         mVASurfaceAttrib->reserved[0] = (unsigned int)mConfigBuffer.nativeWindow;
-        
+
         for (int i = 0; i < mNumSurfaces; i++) {
             mVASurfaceAttrib->buffers[i] = (unsigned int )mConfigBuffer.graphicBufferHandler[i];
         }
@@ -1029,6 +1036,7 @@
     imageFormat.fourcc = VA_FOURCC_NV12;
     imageFormat.byte_order = VA_LSB_FIRST;
     imageFormat.bits_per_pixel = 16;
+
     vaStatus = vaCreateImage(
         mVADisplay,
         &imageFormat,
@@ -1220,3 +1228,60 @@
 
 }
 
+// This function should be called before start() to load different type of parsers
+#ifdef USE_AVC_SHORT_FORMAT
+Decode_Status VideoDecoderBase::setParserType(_vbp_parser_type type) {
+    if ((int32_t)type != VBP_INVALID) {
+        ITRACE("Parser Type = %d", (int32_t)type);
+        mParserType = type;
+        return DECODE_SUCCESS;
+    }
+    else {
+        ETRACE("Invalid parser type = %d", (int32_t)type);
+        return DECODE_NO_PARSER;
+    }
+}
+
+Decode_Status VideoDecoderBase::updateBuffer(uint8_t *buffer, int32_t size, void** vbpData) {
+    if (mParserHandle == NULL) {
+        return DECODE_NO_PARSER;
+    }
+
+    uint32_t vbpStatus;
+    if (buffer == NULL || size <= 0) {
+        return DECODE_INVALID_DATA;
+    }
+
+    vbpStatus = vbp_update(mParserHandle, buffer, size, vbpData);
+    CHECK_VBP_STATUS("vbp_update");
+
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderBase::getCodecSpecificConfigs(
+    VAProfile profile, VAConfigID *config)
+{
+    VAStatus vaStatus;
+    VAConfigAttrib attrib;
+    attrib.type = VAConfigAttribRTFormat;
+    attrib.value = VA_RT_FORMAT_YUV420;
+
+    if (config == NULL) {
+        ETRACE("Invalid parameter!");
+        return DECODE_FAIL;
+    }
+
+    vaStatus = vaCreateConfig(
+            mVADisplay,
+            profile,
+            VAEntrypointVLD,
+            &attrib,
+            1,
+            config);
+
+    CHECK_VA_STATUS("vaCreateConfig");
+
+    return DECODE_SUCCESS;
+}
+#endif
+
diff --git a/videodecoder/VideoDecoderBase.h b/videodecoder/VideoDecoderBase.h
index f0c60cf..cb88622 100644
--- a/videodecoder/VideoDecoderBase.h
+++ b/videodecoder/VideoDecoderBase.h
@@ -87,6 +87,11 @@
          return ((a + 15) & (~15));
     }
 
+#ifdef USE_AVC_SHORT_FORMAT
+    Decode_Status updateBuffer(uint8_t *buffer, int32_t size, void** vbpData);
+    Decode_Status setParserType(_vbp_parser_type type);
+    virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID *config);
+#endif
 private:
     Decode_Status mapSurface(void);
     Decode_Status getRawDataFromSurface(void);
diff --git a/videodecoder/VideoDecoderHost.cpp b/videodecoder/VideoDecoderHost.cpp
index 0181343..93e86c1 100644
--- a/videodecoder/VideoDecoderHost.cpp
+++ b/videodecoder/VideoDecoderHost.cpp
@@ -57,9 +57,6 @@
                strcasecmp(mimeType, "video/3gpp") == 0) {
         VideoDecoderMPEG4 *p = new VideoDecoderMPEG4(mimeType);
         return (IVideoDecoder *)p;
-    } else if (strcasecmp(mimeType, "video/pavc") == 0) {
-        VideoDecoderAVC *p = new VideoDecoderPAVC(mimeType);
-        return (IVideoDecoder *)p;
     }
 #ifdef USE_INTEL_SECURE_AVC
     else if (strcasecmp(mimeType, "video/avc-secure") == 0) {
@@ -67,6 +64,7 @@
         return (IVideoDecoder *)p;
     }
 #endif
+
 #ifdef USE_HW_VP8
     else if (strcasecmp(mimeType, "video/vp8") == 0 ||
         strcasecmp(mimeType, "video/x-vnd.on2.vp8") == 0) {
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
index 3bcfd70..4ded53f 100644
--- a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.cpp
@@ -22,65 +22,21 @@
 *
 */
 
+#include "va_private.h"
 #include "VideoDecoderAVCSecure.h"
 #include "VideoDecoderTrace.h"
 #include <string.h>
 
-
-#define STARTCODE_00                0x00
-#define STARTCODE_01                0x01
 #define STARTCODE_PREFIX_LEN        3
 #define NALU_TYPE_MASK              0x1F
-
-
-// mask for little endian, to mast the second and fourth bytes in the byte stream
-#define STARTCODE_MASK0             0xFF000000 //0x00FF0000
-#define STARTCODE_MASK1             0x0000FF00  //0x000000FF
-
-
-typedef enum {
-    NAL_UNIT_TYPE_unspecified0 = 0,
-    NAL_UNIT_TYPE_SLICE,
-    NAL_UNIT_TYPE_DPA,
-    NAL_UNIT_TYPE_DPB,
-    NAL_UNIT_TYPE_DPC,
-    NAL_UNIT_TYPE_IDR,
-    NAL_UNIT_TYPE_SEI,
-    NAL_UNIT_TYPE_SPS,
-    NAL_UNIT_TYPE_PPS,
-    NAL_UNIT_TYPE_Acc_unit_delimiter,
-    NAL_UNIT_TYPE_EOSeq,
-    NAL_UNIT_TYPE_EOstream,
-    NAL_UNIT_TYPE_filler_data,
-    NAL_UNIT_TYPE_SPS_extension,
-    NAL_UNIT_TYPE_Reserved14,
-    NAL_UNIT_TYPE_Reserved15,
-    NAL_UNIT_TYPE_Reserved16,
-    NAL_UNIT_TYPE_Reserved17,
-    NAL_UNIT_TYPE_Reserved18,
-    NAL_UNIT_TYPE_ACP,
-    NAL_UNIT_TYPE_Reserved20,
-    NAL_UNIT_TYPE_Reserved21,
-    NAL_UNIT_TYPE_Reserved22,
-    NAL_UNIT_TYPE_Reserved23,
-    NAL_UNIT_TYPE_unspecified24,
-} NAL_UNIT_TYPE;
-
-#ifndef min
-#define min(X, Y)  ((X) <(Y) ? (X) : (Y))
-#endif
-
-
+#define MAX_NALU_HEADER_BUFFER      8192
 static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
 
-
 VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
     : VideoDecoderAVC(mimeType),
       mNaluHeaderBuffer(NULL),
-      mInputBuffer(NULL) {
-
-    memset(&mMetadata, 0, sizeof(NaluMetadata));
-    memset(&mByteStream, 0, sizeof(NaluByteStream));
+      mSliceHeaderBuffer(NULL) {
+    setParserType(VBP_H264SECURE);
 }
 
 VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
@@ -92,136 +48,161 @@
         return status;
     }
 
-    mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
-    mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
     mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
 
-    if (mMetadata.naluInfo == NULL ||
-        mByteStream.byteStream == NULL ||
-        mNaluHeaderBuffer == NULL) {
-        ETRACE("Failed to allocate memory.");
-        // TODO: release all allocated memory
+    if (mNaluHeaderBuffer == NULL) {
+        ETRACE("Failed to allocate memory for mNaluHeaderBuffer");
         return DECODE_MEMORY_FAIL;
     }
+
+    mSliceHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
+    if (mSliceHeaderBuffer == NULL) {
+        ETRACE("Failed to allocate memory for mSliceHeaderBuffer");
+        if (mNaluHeaderBuffer) {
+            delete [] mNaluHeaderBuffer;
+            mNaluHeaderBuffer = NULL;
+        }
+        return DECODE_MEMORY_FAIL;
+    }
+
     return status;
 }
 
 void VideoDecoderAVCSecure::stop(void) {
     VideoDecoderAVC::stop();
 
-    if (mMetadata.naluInfo) {
-        delete [] mMetadata.naluInfo;
-        mMetadata.naluInfo = NULL;
-    }
-
-    if (mByteStream.byteStream) {
-        delete [] mByteStream.byteStream;
-        mByteStream.byteStream = NULL;
-    }
-
     if (mNaluHeaderBuffer) {
         delete [] mNaluHeaderBuffer;
         mNaluHeaderBuffer = NULL;
     }
+
+    if (mSliceHeaderBuffer) {
+        delete [] mSliceHeaderBuffer;
+        mSliceHeaderBuffer = NULL;
+    }
+
 }
 
 Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
     Decode_Status status;
     int32_t sizeAccumulated = 0;
+    int32_t sliceHeaderSize = 0;
     int32_t sizeLeft = 0;
-    uint8_t *pByteStream = NULL;
-    NaluInfo *pNaluInfo = mMetadata.naluInfo;
+    int32_t sliceIdx = 0;
+    uint8_t naluType;
+    frame_info_t* pFrameInfo;
 
     if (buffer->flag & IS_SECURE_DATA) {
-        pByteStream = buffer->data;
-        sizeLeft = buffer->size;
-        mInputBuffer = NULL;
+        VTRACE("Decoding protected video ...");
+        mIsEncryptData = 1;
     } else {
-        status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
-        CHECK_STATUS("parseAnnexBStream");
-        pByteStream = mByteStream.byteStream;
-        sizeLeft = mByteStream.streamPos;
-        mInputBuffer = buffer->data;
+        VTRACE("Decoding clear video ...");
+        mIsEncryptData = 0;
+        return VideoDecoderAVC::decode(buffer);
     }
-    if (sizeLeft < 4) {
-        ETRACE("Not enough data to read number of NALU.");
+
+    if (buffer->size != sizeof(frame_info_t)) {
+        ETRACE("Not enough data to read frame_info_t!");
         return DECODE_INVALID_DATA;
     }
+    pFrameInfo = (frame_info_t*) buffer->data;
 
-    // read number of NALU
-    memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
-    pByteStream += 4;
-    sizeLeft -= 4;
+    memcpy(&mEncParam, pFrameInfo->pavp, sizeof(pavp_info_t));
+    for (int32_t i = 0; i < pFrameInfo->num_nalus; i++) {
+        naluType = pFrameInfo->nalus[i].type & NALU_TYPE_MASK;
+        if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+            memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+                &sliceIdx,
+                sizeof(int32_t));
+            sliceHeaderSize += 4;
 
-    if (mMetadata.naluNumber == 0) {
-        WTRACE("Number of NALU is ZERO!");
-        return DECODE_SUCCESS;
-    }
+            memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+                &pFrameInfo->data,
+                sizeof(uint8_t*));
+            sliceHeaderSize += sizeof(uint8_t*);
 
-    for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
-        if (sizeLeft < 12) {
-            ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
-            return DECODE_INVALID_DATA;
-        }
-        sizeLeft -= 12;
-        // read NALU offset
-        memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
+            memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+                &pFrameInfo->nalus[i].offset,
+                sizeof(uint32_t));
+            sliceHeaderSize += sizeof(uint32_t);
 
-        // read NALU size
-        memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
+            memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+                &pFrameInfo->nalus[i].length,
+                sizeof(uint32_t));
+            sliceHeaderSize += sizeof(uint32_t);
 
-        // read NALU header length
-        memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
-
-        if (sizeLeft < pNaluInfo->naluHeaderLen) {
-            ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
-            return DECODE_INVALID_DATA;
-        }
-
-        sizeLeft -=  pNaluInfo->naluHeaderLen;
-
-        if (pNaluInfo->naluHeaderLen) {
-            // copy start code prefix to buffer
+            memcpy(mSliceHeaderBuffer + sliceHeaderSize,
+                pFrameInfo->nalus[i].slice_header,
+                sizeof(slice_header_t));
+            sliceHeaderSize += sizeof(slice_header_t);
+            if (pFrameInfo->nalus[i].type & 0x60) {
+                memcpy(mSliceHeaderBuffer+sliceHeaderSize, pFrameInfo->dec_ref_pic_marking, sizeof(dec_ref_pic_marking_t));
+            } else {
+                memset(mSliceHeaderBuffer+sliceHeaderSize, 0, sizeof(dec_ref_pic_marking_t));
+            }
+            sliceHeaderSize += sizeof(dec_ref_pic_marking_t);
+            sliceIdx++;
+        } else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
             memcpy(mNaluHeaderBuffer + sizeAccumulated,
                 startcodePrefix,
                 STARTCODE_PREFIX_LEN);
             sizeAccumulated += STARTCODE_PREFIX_LEN;
-
-            // copy NALU header
-            memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
-            pByteStream += pNaluInfo->naluHeaderLen;
-
-            sizeAccumulated += pNaluInfo->naluHeaderLen;
+            memcpy(mNaluHeaderBuffer + sizeAccumulated,
+                pFrameInfo->nalus[i].data,
+                pFrameInfo->nalus[i].length);
+            sizeAccumulated += pFrameInfo->nalus[i].length;
         } else {
-            WTRACE("header len is zero for NALU %d", i);
+            WTRACE("Failure: DECODE_FRAME_DROPPED");
+            return DECODE_FRAME_DROPPED;
         }
-
-        // for next NALU
-        pNaluInfo++;
     }
 
-    buffer->data = mNaluHeaderBuffer;
-    buffer->size = sizeAccumulated;
+    vbp_data_h264 *data = NULL;
 
-    return VideoDecoderAVC::decode(buffer);
+    if (sizeAccumulated > 0) {
+        status =  VideoDecoderBase::parseBuffer(
+                mNaluHeaderBuffer,
+                sizeAccumulated,
+                false,
+                (void**)&data);
+        CHECK_STATUS("VideoDecoderBase::parseBuffer");
+    }
+
+    if (sliceHeaderSize > 0) {
+        memset(mSliceHeaderBuffer + sliceHeaderSize, 0xFF, 4);
+        sliceHeaderSize += 4;
+        status =  VideoDecoderBase::updateBuffer(
+                mSliceHeaderBuffer,
+                sliceHeaderSize,
+                (void**)&data);
+        CHECK_STATUS("VideoDecoderBase::updateBuffer");
+    }
+
+    if (!mVAStarted) {
+         if (data->has_sps && data->has_pps) {
+            status = startVA(data);
+            CHECK_STATUS("startVA");
+        } else {
+            WTRACE("Can't start VA as either SPS or PPS is still not available.");
+            return DECODE_SUCCESS;
+        }
+    }
+    status = decodeFrame(buffer, data);
+    return status;
 }
 
-
 Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
-
     Decode_Status status;
     VAStatus vaStatus;
     uint32_t bufferIDCount = 0;
     // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
-    VABufferID bufferIDs[4];
+    VABufferID bufferIDs[5];
 
     vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
     vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
     VAPictureParameterBufferH264 *picParam = picData->pic_parms;
     VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+    VAEncryptionParameterBuffer encryptParam;
 
     if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
         // either condition indicates start of a new frame
@@ -238,16 +219,10 @@
             mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
         }
 
-        // Check there is no reference frame loss before decoding a frame
-
         // Update  the reference frames and surface IDs for DPB and current frame
         status = updateDPB(picParam);
         CHECK_STATUS("updateDPB");
 
-        //We have to provide a hacked DPB rather than complete DPB for libva as workaround
-        status = updateReferenceFrames(picData);
-        CHECK_STATUS("updateReferenceFrames");
-
         vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
         CHECK_VA_STATUS("vaBeginPicture");
 
@@ -275,70 +250,59 @@
             &bufferIDs[bufferIDCount]);
         CHECK_VA_STATUS("vaCreateIQMatrixBuffer");
         bufferIDCount++;
-    }
 
-    status = setReference(sliceParam);
-    CHECK_STATUS("setReference");
+        if (mIsEncryptData) {
+            memset(&encryptParam, 0, sizeof(VAEncryptionParameterBuffer));
+            encryptParam.pavpCounterMode = 4;
+            encryptParam.pavpEncryptionType = 2;
+            encryptParam.hostEncryptMode = 2;
+            encryptParam.pavpHasBeenEnabled = 1;
+            encryptParam.app_id = 0;
+            memcpy(encryptParam.pavpAesCounter, mEncParam.iv, 16);
 
-    // find which naluinfo is correlated to current slice
-    int naluIndex = 0;
-    uint32_t accumulatedHeaderLen = 0;
-    uint32_t headerLen = 0;
-    for (; naluIndex < mMetadata.naluNumber; naluIndex++)  {
-        headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
-        if (headerLen == 0) {
-            WTRACE("lenght of current NAL unit is 0.");
-            continue;
+            vaStatus = vaCreateBuffer(
+                mVADisplay,
+                mVAContext,
+                (VABufferType)VAEncryptionParameterBufferType,
+                sizeof(VAEncryptionParameterBuffer),
+                1,
+                &encryptParam,
+                &bufferIDs[bufferIDCount]);
+            CHECK_VA_STATUS("vaCreateEncryptionParameterBuffer");
+            bufferIDCount++;
         }
-        accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
-        if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
-            break;
-        }
-        accumulatedHeaderLen += headerLen;
+
     }
-
-    if (sliceData->slice_offset != accumulatedHeaderLen) {
-        WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
-    }
-
-    sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
-    sliceData->slice_size = sliceParam->slice_data_size;
-
-    // no need to update:
-    // sliceParam->slice_data_offset - 0 always
-    // sliceParam->slice_data_bit_offset - relative to  sliceData->slice_offset
-
     vaStatus = vaCreateBuffer(
         mVADisplay,
         mVAContext,
         VASliceParameterBufferType,
-        sizeof(VASliceParameterBufferH264),
+        sizeof(VASliceParameterBufferH264Base),
         1,
         sliceParam,
         &bufferIDs[bufferIDCount]);
+
     CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
     bufferIDCount++;
 
-    // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
-    // offset points to first byte of NAL unit
-    uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
-    if (mInputBuffer != NULL) {
+    if (mIsEncryptData) {
         vaStatus = vaCreateBuffer(
             mVADisplay,
             mVAContext,
             VASliceDataBufferType,
             sliceData->slice_size, //size
             1,        //num_elements
-            mInputBuffer  + sliceOffset,
+            sliceData->buffer_addr + sliceData->slice_offset,
             &bufferIDs[bufferIDCount]);
     } else {
+        // This is for clear video playback
         vaStatus = vaCreateBuffer(
             mVADisplay,
             mVAContext,
-            VAProtectedSliceDataBufferType,
+            VASliceDataBufferType,
             sliceData->slice_size, //size
             1,        //num_elements
-            (uint8_t*)sliceOffset, // IMR offset
+            sliceData->buffer_addr + sliceData->slice_offset,
             &bufferIDs[bufferIDCount]);
     }
     CHECK_VA_STATUS("vaCreateSliceDataBuffer");
@@ -354,162 +318,44 @@
     return DECODE_SUCCESS;
 }
 
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+    VAProfile profile, VAConfigID *config)
+{
+    VAStatus vaStatus;
+    VAConfigAttrib attrib[2];
 
-// Parse byte string pattern "0x000001" (3 bytes)  in the current buffer.
-// Returns offset of position following  the pattern in the buffer if pattern is found or -1 if not found.
-int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
-    uint8_t *ptr;
-    uint32_t left = 0, data = 0, phase = 0;
-    uint8_t mask1 = 0, mask2 = 0;
-
-    /* Meaning of phase:
-        0: initial status, "0x000001" bytes are not found so far;
-        1: one "0x00" byte is found;
-        2: two or more consecutive "0x00" bytes" are found;
-        3: "0x000001" patten is found ;
-        4: if there is one more byte after "0x000001";
-       */
-
-    left = length;
-    ptr = (uint8_t *) (stream + offset);
-    phase = 0;
-
-    // parse until there is more data and start code not found
-    while ((left > 0) && (phase < 3)) {
-        // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
-        if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
-            while (left > 3) {
-                data = *((uint32_t *)ptr);
-                mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
-                mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
-                // If second byte and fourth byte are not zero's then we cannot have a start code here,
-                //  as we need two consecutive zero bytes for a start code pattern.
-                if (mask1 && mask2) {
-                    // skip 4 bytes and start over
-                    ptr += 4;
-                    left -=4;
-                    continue;
-                } else {
-                    break;
-                }
-            }
-        }
-
-        // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
-        if (left > 0) {
-            if (*ptr == STARTCODE_00) {
-                phase++;
-                if (phase > 2) {
-                    // more than 2 consecutive '0x00' bytes is found
-                    phase = 2;
-                }
-            } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
-                // start code is found
-                phase = 3;
-            } else {
-                // reset lookup
-                phase = 0;
-            }
-            ptr++;
-            left--;
-        }
+    if (config == NULL) {
+        ETRACE("Invalid parameter!");
+        return DECODE_FAIL;
     }
 
-    if ((left > 0) && (phase == 3)) {
-        phase = 4;
-        // return offset of position following the pattern in the buffer which matches "0x000001" byte string
-        return (int32_t)(ptr - stream);
-    }
-    return -1;
-}
+    attrib[0].type = VAConfigAttribRTFormat;
+    attrib[0].value = VA_RT_FORMAT_YUV420;
+    attrib[1].type = VAConfigAttribDecSliceMode;
+    attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
 
+    vaStatus = vaGetConfigAttributes(mVADisplay,profile,VAEntrypointVLD, &attrib[1], 1);
 
-Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
-    uint8_t naluType;
-    int32_t naluHeaderLen;
-
-    naluType = *(uint8_t *)(stream + naluStream->naluOffset);
-    naluType &= NALU_TYPE_MASK;
-    // first update nalu header length based on nalu type
-    if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
-        // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
-        naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
-    } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
-        //sps, pps, sei, etc, return the entire NAL unit in clear
-        naluHeaderLen = naluStream->naluLen;
+    if (attrib[1].value & VA_DEC_SLICE_MODE_BASE)
+    {
+        ITRACE("AVC short format used");
+        attrib[1].value = VA_DEC_SLICE_MODE_BASE;
+    } else if (attrib[1].value & VA_DEC_SLICE_MODE_NORMAL) {
+        ITRACE("AVC long format ssed");
+        attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
     } else {
-        return DECODE_FRAME_DROPPED;
+        ETRACE("Unsupported Decode Slice Mode!");
+        return DECODE_FAIL;
     }
 
-    memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
-    naluStream->streamPos += 4;
+    vaStatus = vaCreateConfig(
+            mVADisplay,
+            profile,
+            VAEntrypointVLD,
+            &attrib[0],
+            2,
+            config);
+    CHECK_VA_STATUS("vaCreateConfig");
 
-    memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
-    naluStream->streamPos += 4;
-
-    memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
-    naluStream->streamPos += 4;
-
-    if (naluHeaderLen) {
-        memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
-        naluStream->streamPos += naluHeaderLen;
-    }
     return DECODE_SUCCESS;
 }
-
-
-// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
-Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
-    int32_t naluOffset, offset, left;
-    NaluInfo *info;
-    uint32_t ret = DECODE_SUCCESS;
-
-    naluOffset = 0;
-    offset = 0;
-    left = length;
-
-    // leave 4 bytes to copy nalu count
-    naluStream->streamPos = 4;
-    naluStream->naluCount = 0;
-    memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
-
-    for (; ;) {
-        naluOffset = findNalUnitOffset(stream, offset, left);
-        if (naluOffset == -1) {
-            break;
-        }
-
-        if (naluStream->naluCount == 0) {
-            naluStream->naluOffset = naluOffset;
-        } else {
-            naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
-            ret = copyNaluHeader(stream, naluStream);
-            if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
-                LOGW("copyNaluHeader returned %d", ret);
-                return ret;
-            }
-            // starting position for next NALU
-            naluStream->naluOffset = naluOffset;
-        }
-
-        if (ret == DECODE_SUCCESS) {
-            naluStream->naluCount++;
-        }
-
-        // update next lookup position and length
-        offset = naluOffset + 1; // skip one byte of NAL unit type
-        left = length - offset;
-    }
-
-    if (naluStream->naluCount > 0) {
-        naluStream->naluLen = length - naluStream->naluOffset;
-        memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
-        // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
-        copyNaluHeader(stream, naluStream);
-        return DECODE_SUCCESS;
-    }
-
-    LOGW("number of valid NALU is 0!");
-    return DECODE_SUCCESS;
-}
-
diff --git a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
index af5ae44..2b2e489 100644
--- a/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
+++ b/videodecoder/securevideo/baytrail/VideoDecoderAVCSecure.h
@@ -26,58 +26,26 @@
 #define VIDEO_DECODER_AVC_SECURE_H_
 
 #include "VideoDecoderAVC.h"
-
+#include "secvideoparser.h"
 
 class VideoDecoderAVCSecure : public VideoDecoderAVC {
 public:
     VideoDecoderAVCSecure(const char *mimeType);
     virtual ~VideoDecoderAVCSecure();
-
     virtual Decode_Status start(VideoConfigBuffer *buffer);
     virtual void stop(void);
-
-    // data in the decoded buffer is all encrypted.
     virtual Decode_Status decode(VideoDecodeBuffer *buffer);
 
+protected:
+    virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+
 private:
-    enum {
-        MAX_SLICE_HEADER_SIZE  = 30,
-        MAX_NALU_HEADER_BUFFER = 8192,
-        MAX_NALU_NUMBER = 400,  // > 4096/12
-    };
-
-    // Information of Network Abstraction Layer Unit
-    struct NaluInfo {
-        int32_t naluOffset;                        // offset of NAL unit in the firewalled buffer
-        int32_t naluLen;                           // length of NAL unit
-        int32_t naluHeaderLen;                     // length of NAL unit header
-    };
-
-    struct NaluMetadata {
-        NaluInfo *naluInfo;
-        int32_t naluNumber;  // number of NAL units
-    };
-
-    struct NaluByteStream {
-        int32_t naluOffset;
-        int32_t naluLen;
-        int32_t streamPos;
-        uint8_t *byteStream;   // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
-        int32_t naluCount;
-    };
-
     virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
-    int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
-    Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
-    Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
-
 private:
-    NaluMetadata mMetadata;
-    NaluByteStream mByteStream;
+    pavp_info_t mEncParam;
     uint8_t *mNaluHeaderBuffer;
-    uint8_t *mInputBuffer;
+    uint8_t *mSliceHeaderBuffer;
+    uint32_t mIsEncryptData;
 };
 
-
-
 #endif /* VIDEO_DECODER_AVC_SECURE_H_ */
diff --git a/videodecoder/securevideo/baytrail/secvideoparser.h b/videodecoder/securevideo/baytrail/secvideoparser.h
new file mode 100644
index 0000000..18f487d
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/secvideoparser.h
@@ -0,0 +1,157 @@
+/* INTEL CONFIDENTIAL
+* Copyright (c) 2013 Intel Corporation.  All rights reserved.
+*
+* The source code contained or described herein and all documents
+* related to the source code ("Material") are owned by Intel
+* Corporation or its suppliers or licensors.  Title to the
+* Material remains with Intel Corporation or its suppliers and
+* licensors.  The Material contains trade secrets and proprietary
+* and confidential information of Intel or its suppliers and
+* licensors. The Material is protected by worldwide copyright and
+* trade secret laws and treaty provisions.  No part of the Material
+* may be used, copied, reproduced, modified, published, uploaded,
+* posted, transmitted, distributed, or disclosed in any way without
+* Intel's prior express written permission.
+*
+* No license under any patent, copyright, trade secret or other
+* intellectual property right is granted to or conferred upon you
+* by disclosure or delivery of the Materials, either expressly, by
+* implication, inducement, estoppel or otherwise. Any license
+* under such intellectual property rights must be express and
+* approved by Intel in writing.
+*/
+
+#ifndef SEC_VIDEO_PARSER_H_
+#define SEC_VIDEO_PARSER_H_
+
+#include <stdint.h>
+
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+    h264_NAL_UNIT_TYPE_unspecified = 0,
+    h264_NAL_UNIT_TYPE_SLICE,
+    h264_NAL_UNIT_TYPE_DPA,
+    h264_NAL_UNIT_TYPE_DPB,
+    h264_NAL_UNIT_TYPE_DPC,
+    h264_NAL_UNIT_TYPE_IDR,
+    h264_NAL_UNIT_TYPE_SEI,
+    h264_NAL_UNIT_TYPE_SPS,
+    h264_NAL_UNIT_TYPE_PPS,
+    h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+    h264_NAL_UNIT_TYPE_EOSeq,
+    h264_NAL_UNIT_TYPE_EOstream,
+    h264_NAL_UNIT_TYPE_filler_data,
+    h264_NAL_UNIT_TYPE_SPS_extension,
+    h264_NAL_UNIT_TYPE_ACP = 19,
+    h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
+
+#define MAX_OP  16
+
+enum dec_ref_pic_marking_flags {
+    IDR_PIC_FLAG = 0,
+    NO_OUTPUT_OF_PRIOR_PICS_FLAG,
+    LONG_TERM_REFERENCE_FLAG,
+    ADAPTIVE_REF_PIC_MARKING_MODE_FLAG
+};
+
+typedef struct _dec_ref_pic_marking_t {
+    union {
+        uint8_t flags;
+        struct {
+            uint8_t idr_pic_flag:1;
+            uint8_t no_output_of_prior_pics_flag:1;
+            uint8_t long_term_reference_flag:1;
+            uint8_t adaptive_ref_pic_marking_mode_flag:1;
+        };
+    };
+    struct {
+        uint8_t memory_management_control_operation;
+        union {
+            struct {
+                uint8_t difference_of_pic_nums_minus1;
+            } op1;
+            struct {
+                uint8_t long_term_pic_num;
+            } op2;
+            struct {
+                uint8_t difference_of_pic_nums_minus1;
+                uint8_t long_term_frame_idx;
+            } op3;
+            struct {
+                uint8_t max_long_term_frame_idx_plus1;
+            } op4;
+            struct {
+                uint8_t long_term_frame_idx;
+            } op6;
+        };
+    } op[MAX_OP];
+} dec_ref_pic_marking_t;
+
+enum slice_header_flags {
+    FIELD_PIC_FLAG = 0,
+    BOTTOM_FIELD_FLAG
+};
+
+typedef struct _slice_header_t {
+    uint8_t nal_unit_type;
+    uint8_t pps_id;
+    uint8_t padding;    // TODO: padding needed because flags in secfw impl. is a big-endian uint16_t
+    union {
+        uint8_t flags;
+        struct {
+            uint8_t field_pic_flag:1;
+            uint8_t bottom_field_flag:1;
+        };
+    };
+    uint32_t first_mb_in_slice;
+    uint32_t frame_num;
+    uint16_t idr_pic_id;
+    uint16_t pic_order_cnt_lsb;
+    int32_t delta_pic_order_cnt[2];
+    int32_t delta_pic_order_cnt_bottom;
+} slice_header_t;
+
+typedef struct {
+    uint8_t type;
+    uint32_t offset;
+    uint8_t* data;
+    uint32_t length;
+    slice_header_t* slice_header;
+} nalu_info_t;
+
+typedef struct {
+    uint32_t iv[4];
+    uint32_t mode;
+    uint32_t app_id;
+} pavp_info_t;
+
+#define MAX_NUM_NALUS   20
+
+typedef struct {
+    uint8_t* data;
+    uint32_t length;
+    pavp_info_t* pavp;
+    dec_ref_pic_marking_t* dec_ref_pic_marking;
+    uint32_t num_nalus;
+    nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+int parser_init(void);
+int parse_frame(uint8_t* frame, uint32_t frame_size, uint8_t* nalu_data, uint32_t* nalu_data_size);
+
+// DEBUG PRINTING
+void print_slice_header(slice_header_t* slice_header);
+void print_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void print_data_bytes(uint8_t* data, uint32_t count);
+void print_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+// BYTESWAPPING
+uint16_t byteswap_16(uint16_t word);
+uint32_t byteswap_32(uint32_t dword);
+void byteswap_slice_header(slice_header_t* slice_header);
+void byteswap_dec_ref_pic_marking(dec_ref_pic_marking_t* dec_ref_pic_marking);
+void byteswap_nalu_data(uint8_t* nalu_data, uint32_t nalu_data_size);
+
+#endif /* SEC_VIDEO_PARSER_H_ */
diff --git a/videodecoder/securevideo/baytrail/va_private.h b/videodecoder/securevideo/baytrail/va_private.h
new file mode 100644
index 0000000..067e334
--- /dev/null
+++ b/videodecoder/securevideo/baytrail/va_private.h
@@ -0,0 +1,77 @@
+/*===================== begin_copyright_notice ==================================
+
+INTEL CONFIDENTIAL
+Copyright 2009-2012
+Intel Corporation All Rights Reserved.
+
+The source code contained or described herein and all documents related to the
+source code ("Material") are owned by Intel Corporation or its suppliers or
+licensors. Title to the Material remains with Intel Corporation or its suppliers
+and licensors. The Material contains trade secrets and proprietary and confidential
+information of Intel or its suppliers and licensors. The Material is protected by
+worldwide copyright and trade secret laws and treaty provisions. No part of the
+Material may be used, copied, reproduced, modified, published, uploaded, posted,
+transmitted, distributed, or disclosed in any way without Intel’s prior express
+written permission.
+
+No license under any patent, copyright, trade secret or other intellectual
+property right is granted to or conferred upon you by disclosure or delivery
+of the Materials, either expressly, by implication, inducement, estoppel
+or otherwise. Any license under such intellectual property rights must be
+express and approved by Intel in writing.
+
+File Name: va_private.h
+Abstract: libva private API head file
+
+Environment: Linux/Android
+
+Notes:
+
+======================= end_copyright_notice ==================================*/
+#ifndef __VA_PRIVATE_H__
+#define __VA_PRIVATE_H__
+#include <va/va.h>
+#define ENABLE_PAVP_LINUX                   1
+// Misc parameter for encoder
+#define  VAEncMiscParameterTypePrivate     -2
+// encryption parameters for PAVP
+#define  VAEncryptionParameterBufferType   -3
+
+typedef struct _VAEncMiscParameterPrivate
+{
+    unsigned int target_usage; // Valid values 1-7 for AVC & MPEG2.
+    unsigned int reserved[7];  // Reserved for future use.
+} VAEncMiscParameterPrivate;
+
+/*VAEncrytpionParameterBuffer*/
+typedef struct _VAEncryptionParameterBuffer
+{
+    //Not used currently
+    unsigned int encryptionSupport;
+    //Not used currently
+    unsigned int hostEncryptMode;
+    // For IV, Counter input
+    unsigned int pavpAesCounter[2][4];
+    // not used currently
+    unsigned int pavpIndex;
+    // PAVP mode, CTR, CBC, DEDE etc
+    unsigned int pavpCounterMode;
+    unsigned int pavpEncryptionType;
+    // not used currently
+    unsigned int pavpInputSize[2];
+    // not used currently
+    unsigned int pavpBufferSize[2];
+    // not used currently
+    VABufferID   pvap_buf;
+    // set to TRUE if protected media
+    unsigned int pavpHasBeenEnabled;
+    // not used currently
+    unsigned int IntermmediatedBufReq;
+    // not used currently
+    unsigned int uiCounterIncrement;
+    // AppId: PAVP sessin Index from application
+    unsigned int app_id;
+
+} VAEncryptionParameterBuffer;
+
+#endif