Implement slice header parsing for widevine playback

BZ: 146336

This patch contains the implementation of slice header
parsing for widevine playback on merrifield platforms.

Change-Id: Ife3dd03fadbd40ecbd4d6070e0dd1de8d811d262
Signed-off-by: wfeng6 <wei.feng@intel.com>
diff --git a/mixvbp/vbp_manager/Android.mk b/mixvbp/vbp_manager/Android.mk
index e30a0e0..d60b7d6 100755
--- a/mixvbp/vbp_manager/Android.mk
+++ b/mixvbp/vbp_manager/Android.mk
@@ -60,4 +60,12 @@
 LOCAL_SRC_FILES += secvideo/baytrail/vbp_h264secure_parser.c
 endif
 
+PLATFORM_SUPPORT_USE_SLICE_HEADER_PARSING := merrifield
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_USE_SLICE_HEADER_PARSING)),)
+LOCAL_CFLAGS += -DUSE_SLICE_HEADER_PARSING
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/secvideo/merrifield/
+LOCAL_SRC_FILES += secvideo/merrifield/vbp_h264secure_parser.c
+endif
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/mixvbp/vbp_manager/include/viddec_parser_ops.h b/mixvbp/vbp_manager/include/viddec_parser_ops.h
index f9629d5..77054b5 100755
--- a/mixvbp/vbp_manager/include/viddec_parser_ops.h
+++ b/mixvbp/vbp_manager/include/viddec_parser_ops.h
@@ -26,7 +26,7 @@
 typedef    uint32_t (*fn_gen_contrib_tags)(void *parent, uint32_t ignore_partial);
 typedef    uint32_t (*fn_gen_assoc_tags)(void *parent);
 typedef    void (*fn_flush_parser) (void *parent, void *ctxt);
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 typedef    uint32_t (*fn_update_data)(void *parent, void *data, uint32_t size);
 #endif
 
@@ -42,7 +42,7 @@
     fn_gen_contrib_tags gen_contrib_tags;
     fn_gen_assoc_tags gen_assoc_tags;
     fn_flush_parser flush;
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
     fn_update_data update_data;
 #endif
 } viddec_parser_ops_t;
diff --git a/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c
new file mode 100755
index 0000000..3f3eeef
--- /dev/null
+++ b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c
@@ -0,0 +1,1938 @@
+/* INTEL CONFIDENTIAL
+* Copyright (c) 2009, 2012 Intel Corporation.  All rights reserved.
+*
+* The source code contained or described herein and all documents
+* related to the source code ("Material") are owned by Intel
+* Corporation or its suppliers or licensors.  Title to the
+* Material remains with Intel Corporation or its suppliers and
+* licensors.  The Material contains trade secrets and proprietary
+* and confidential information of Intel or its suppliers and
+* licensors. The Material is protected by worldwide copyright and
+* trade secret laws and treaty provisions.  No part of the Material
+* may be used, copied, reproduced, modified, published, uploaded,
+* posted, transmitted, distributed, or disclosed in any way without
+* Intel's prior express written permission.
+*
+* No license under any patent, copyright, trade secret or other
+* intellectual property right is granted to or conferred upon you
+* by disclosure or delivery of the Materials, either expressly, by
+* implication, inducement, estoppel or otherwise. Any license
+* under such intellectual property rights must be express and
+* approved by Intel in writing.
+*
+*/
+
+
+#include <dlfcn.h>
+#include <va/va.h>
+#include "h264.h"
+#include "vbp_loader.h"
+#include "vbp_utils.h"
+#include "vbp_h264secure_parser.h"
+
+typedef struct vbp_h264_parser_private_t vbp_h264_parser_private;
+
+typedef enum
+{
+    H264_BS_LENGTH_PREFIXED,
+    H264_BS_SC_PREFIXED,
+    H264_BS_SINGLE_NAL
+} H264_BS_PATTERN;
+
+#define MAX_PARSED_SLICE_NUM 16
+#define TERMINATE_KEY 0xFFFFFFFF
+#define BUF_TOO_SMALL_KEY 0xFFFFFFFE
+#define SLICE_TOO_MAY_KEY 0xFFFFFFFD
+
+typedef struct _vbp_h264_sliceheader {
+    uint32 sliceHeaderKey;
+    VAParseSliceHeaderGroupBuffer parsedSliceHeader;
+    uint32 *reorder_cmd;
+    int16 *weight;
+    uint32 *pic_marking;
+} vbp_h264_sliceheader;
+
+typedef struct _vbp_h264_sliceheadergroup {
+    uint32  sliceHeaderNum;
+    vbp_h264_sliceheader  sliceHeaders[MAX_PARSED_SLICE_NUM];
+} vbp_h264_sliceheadergroup;
+
+struct vbp_h264_parser_private_t
+{
+    /* number of bytes used to encode length of NAL payload.  If parser does not receive configuration data
+    and NAL_length_size is equal to zero when bitstream parsing begins, we assume bitstream is in AnnexB
+    byte stream format. */
+    int NAL_length_size;
+
+    /* indicate if stream is length prefixed */
+    int length_prefix_verified;
+
+    H264_BS_PATTERN bitstream_pattern;
+};
+
+/* default scaling list table */
+static uint8 Default_4x4_Intra[16] =
+{
+    6,13,20,28,
+    13,20,28,32,
+    20,28,32,37,
+    28,32,37,42
+};
+
+static uint8 Default_4x4_Inter[16] =
+{
+    10,14,20,24,
+    14,20,24,27,
+    20,24,27,30,
+    24,27,30,34
+};
+
+static uint8 Default_8x8_Intra[64] =
+{
+    6,10,13,16,18,23,25,27,
+    10,11,16,18,23,25,27,29,
+    13,16,18,23,25,27,29,31,
+    16,18,23,25,27,29,31,33,
+    18,23,25,27,29,31,33,36,
+    23,25,27,29,31,33,36,38,
+    25,27,29,31,33,36,38,40,
+    27,29,31,33,36,38,40,42
+};
+
+static uint8 Default_8x8_Inter[64] =
+{
+    9,13,15,17,19,21,22,24,
+    13,13,17,19,21,22,24,25,
+    15,17,19,21,22,24,25,27,
+    17,19,21,22,24,25,27,28,
+    19,21,22,24,25,27,28,30,
+    21,22,24,25,27,28,30,32,
+    22,24,25,27,28,30,32,33,
+    24,25,27,28,30,32,33,35
+};
+
+static uint8 quant_flat[16] =
+{
+    16,16,16,16,
+    16,16,16,16,
+    16,16,16,16,
+    16,16,16,16
+};
+
+static uint8 quant8_flat[64] =
+{
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16,
+    16,16,16,16,16,16,16,16
+};
+
+static uint8* UseDefaultList[8] =
+{
+    Default_4x4_Intra, Default_4x4_Intra, Default_4x4_Intra,
+    Default_4x4_Inter, Default_4x4_Inter, Default_4x4_Inter,
+    Default_8x8_Intra,
+    Default_8x8_Inter
+};
+
+static uint8 h264_aspect_ratio_table[][2] =
+{
+    {0, 0},
+    {1, 1},
+    {12, 11},
+    {10, 11},
+    {16, 11},
+    {40, 33},
+    {24, 11},
+    {20, 11},
+    {32, 11},
+    {80, 33},
+    {18, 11},
+    {15, 11},
+    {64, 33},
+    {160, 99},
+    {4, 3},
+    {3, 2},
+    {2, 1},
+    {0, 0}
+};
+
+
+/**
+ *
+ */
+uint32 vbp_init_parser_entries_h264secure(vbp_context *pcontext)
+{
+    if (NULL == pcontext->parser_ops)
+    {
+        return VBP_PARM;
+    }
+
+    pcontext->parser_ops->init = dlsym(pcontext->fd_parser, "viddec_h264secure_init");
+    if (NULL == pcontext->parser_ops->init)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->parse_sc = viddec_parse_sc;
+
+    pcontext->parser_ops->parse_syntax = dlsym(pcontext->fd_parser, "viddec_h264secure_parse");
+    if (NULL == pcontext->parser_ops->parse_syntax)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->get_cxt_size = dlsym(pcontext->fd_parser, "viddec_h264secure_get_context_size");
+    if (NULL == pcontext->parser_ops->get_cxt_size)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+
+    pcontext->parser_ops->is_wkld_done = NULL;
+    pcontext->parser_ops->flush = NULL;
+    pcontext->parser_ops->update_data = dlsym(pcontext->fd_parser, "viddec_h264secure_update");
+    if (NULL == pcontext->parser_ops->update_data)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+/*
+    pcontext->parser_ops->flush = dlsym(pcontext->fd_parser, "viddec_h264secure_flush");
+    if (NULL == pcontext->parser_ops->flush)
+    {
+        ETRACE ("Failed to set entry point.");
+        return VBP_LOAD;
+    }
+*/
+    /* entry point not needed */
+    pcontext->parser_ops->is_frame_start = NULL;
+    return VBP_OK;
+}
+
+
+/**
+ *
+ */
+uint32 vbp_allocate_query_data_h264secure(vbp_context *pcontext)
+{
+    if (NULL != pcontext->query_data)
+    {
+        return VBP_PARM;
+    }
+
+    pcontext->query_data = NULL;
+    vbp_data_h264 *query_data = NULL;
+
+    query_data = vbp_malloc_set0(vbp_data_h264, 1);
+    if (NULL == query_data)
+    {
+        goto cleanup;
+    }
+
+    /* assign the pointer */
+    pcontext->query_data = (void *)query_data;
+
+    query_data->pic_data = vbp_malloc_set0(vbp_picture_data_h264, MAX_NUM_PICTURES);
+    if (NULL == query_data->pic_data)
+    {
+        goto cleanup;
+    }
+
+    int i;
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].pic_parms = vbp_malloc_set0(VAPictureParameterBufferH264, 1);
+        if (NULL == query_data->pic_data[i].pic_parms)
+        {
+            goto cleanup;
+        }
+        query_data->pic_data[i].num_slices = 0;
+        query_data->pic_data[i].slc_data = vbp_malloc_set0(vbp_slice_data_h264, MAX_NUM_SLICES);
+        if (NULL == query_data->pic_data[i].slc_data)
+        {
+            goto cleanup;
+        }
+    }
+
+    query_data->IQ_matrix_buf = vbp_malloc_set0(VAIQMatrixBufferH264, 1);
+    if (NULL == query_data->IQ_matrix_buf)
+    {
+        goto cleanup;
+    }
+
+    query_data->codec_data = vbp_malloc_set0(vbp_codec_data_h264, 1);
+    if (NULL == query_data->codec_data)
+    {
+        goto cleanup;
+    }
+
+    pcontext->parser_private = NULL;
+    vbp_h264_parser_private *parser_private = NULL;
+
+    parser_private = vbp_malloc_set0(vbp_h264_parser_private, 1);
+    if (NULL == parser_private)
+    {
+        goto cleanup;
+    }
+
+    /* assign the pointer */
+    pcontext->parser_private = (void *)parser_private;
+
+    /* init the pointer */
+    parser_private->NAL_length_size = 0;
+
+    parser_private->length_prefix_verified = 0;
+
+    parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+
+    query_data->pic_parse_buffer = vbp_malloc_set0(VAParsePictureParameterBuffer,1);
+    if (NULL == query_data->pic_parse_buffer)
+    {
+        goto cleanup;
+    }
+
+    return VBP_OK;
+
+cleanup:
+    vbp_free_query_data_h264secure(pcontext);
+
+    return VBP_MEM;
+}
+
+uint32 vbp_free_query_data_h264secure(vbp_context *pcontext)
+{
+    if (NULL != pcontext->parser_private)
+    {
+        free(pcontext->parser_private);
+        pcontext->parser_private = NULL;
+    }
+
+    if (NULL == pcontext->query_data)
+    {
+        return VBP_OK;
+    }
+
+    int i;
+    vbp_data_h264 *query_data;
+    query_data = (vbp_data_h264 *)pcontext->query_data;
+
+    if (query_data->pic_data)
+    {
+        for (i = 0; i < MAX_NUM_PICTURES; i++)
+        {
+            if (query_data->pic_data[i].slc_data)
+            {
+                free(query_data->pic_data[i].slc_data);
+                query_data->pic_data[i].slc_data = NULL;
+            }
+            if (query_data->pic_data[i].pic_parms)
+            {
+                free(query_data->pic_data[i].pic_parms);
+                query_data->pic_data[i].pic_parms = NULL;
+            }
+        }
+        free(query_data->pic_data);
+        query_data->pic_data = NULL;
+    }
+    if (query_data->IQ_matrix_buf)
+    {
+        free(query_data->IQ_matrix_buf);
+        query_data->IQ_matrix_buf = NULL;
+    }
+    if (query_data->codec_data)
+    {
+        free(query_data->codec_data);
+        query_data->codec_data = NULL;
+    }
+    if (query_data->pic_parse_buffer)
+    {
+        free(query_data->pic_parse_buffer);
+        query_data->pic_parse_buffer = NULL;
+    }
+    free(query_data);
+    pcontext->query_data = NULL;
+
+    return VBP_OK;
+}
+
+
+static inline uint16_t vbp_utils_ntohs(uint8_t* p)
+{
+    uint16_t i = ((*p) << 8) + ((*(p+1)));
+    return i;
+}
+
+static inline uint32_t vbp_utils_ntohl(uint8_t* p)
+{
+    uint32_t i = ((*p) << 24) + ((*(p+1)) << 16) + ((*(p+2)) << 8) + ((*(p+3)));
+    return i;
+}
+
+
+static void vbp_set_VAPicture_h264secure(
+    int curr_picture_structure,
+    int bottom_field,
+    frame_store* store,
+    VAPictureH264* pic)
+{
+    if (FRAME == curr_picture_structure)
+    {
+        if (FRAME != viddec_h264_get_dec_structure(store))
+        {
+            WTRACE("Reference picture structure is not frame for current frame picture!");
+        }
+        pic->flags = 0;
+        pic->TopFieldOrderCnt = store->top_field.poc;
+        pic->BottomFieldOrderCnt = store->bottom_field.poc;
+    }
+    else
+    {
+        if (FRAME == viddec_h264_get_dec_structure(store))
+        {
+            WTRACE("reference picture structure is frame for current field picture!");
+        }
+        if (bottom_field)
+        {
+            pic->flags = VA_PICTURE_H264_BOTTOM_FIELD;
+            pic->TopFieldOrderCnt = store->top_field.poc;
+            pic->BottomFieldOrderCnt = store->bottom_field.poc;
+        }
+        else
+        {
+            pic->flags = VA_PICTURE_H264_TOP_FIELD;
+            pic->TopFieldOrderCnt = store->top_field.poc;
+            pic->BottomFieldOrderCnt = store->bottom_field.poc;
+        }
+    }
+}
+
+static void vbp_set_slice_ref_list_h264secure(
+    struct h264_viddec_parser* h264_parser,
+    VASliceParameterBufferH264 *slc_parms)
+{
+    VTRACE("vbp_set_slice_ref_list_h264secure +++");
+    int i, j;
+    int num_ref_idx_active = 0;
+    h264_Slice_Header_t* slice_header = &(h264_parser->info.SliceHeader);
+    uint8_t* p_list = NULL;
+    VAPictureH264* refPicListX = NULL;
+    frame_store* fs = NULL;
+
+    /* initialize ref picutre list, set picture id and flags to invalid. */
+
+    VTRACE("slice_header->slice_type = %d", slice_header->slice_type);
+    for (i = 0; i < 2; i++)
+    {
+        refPicListX = (i == 0) ? &(slc_parms->RefPicList0[0]) : &(slc_parms->RefPicList1[0]);
+        for (j = 0; j < 32; j++)
+        {
+            refPicListX->picture_id = VA_INVALID_SURFACE;
+            refPicListX->frame_idx = 0;
+            refPicListX->flags = VA_PICTURE_H264_INVALID;
+            refPicListX->TopFieldOrderCnt = 0;
+            refPicListX->BottomFieldOrderCnt = 0;
+            refPicListX++;
+        }
+    }
+
+    for (i = 0; i < 2; i++)
+    {
+        refPicListX = (i == 0) ? &(slc_parms->RefPicList0[0]) : &(slc_parms->RefPicList1[0]);
+
+        if ((i == 0) &&
+            ((h264_PtypeB == slice_header->slice_type) ||
+             (h264_PtypeP == slice_header->slice_type)))
+        {
+            num_ref_idx_active = slice_header->num_ref_idx_l0_active;
+            if (slice_header->sh_refpic_l0.ref_pic_list_reordering_flag)
+            {
+                p_list = h264_parser->info.slice_ref_list0;
+            }
+            else
+            {
+                p_list = h264_parser->info.dpb.listX_0;
+            }
+        }
+        else if ((i == 1) && (h264_PtypeB == slice_header->slice_type))
+        {
+            VTRACE("num_ref_idx_l1_active = %d", slice_header->num_ref_idx_l1_active);
+            num_ref_idx_active = slice_header->num_ref_idx_l1_active;
+            if (slice_header->sh_refpic_l1.ref_pic_list_reordering_flag)
+            {
+                p_list = h264_parser->info.slice_ref_list1;
+            }
+            else
+            {
+                p_list = h264_parser->info.dpb.listX_1;
+            }
+        }
+        else
+        {
+            num_ref_idx_active = 0;
+            p_list = NULL;
+        }
+
+
+        for (j = 0; j < num_ref_idx_active; j++)
+        {
+            fs = &(h264_parser->info.dpb.fs[(p_list[j] & 0x1f)]);
+
+            /* bit 5 indicates if reference picture is bottom field */
+            vbp_set_VAPicture_h264secure(
+                h264_parser->info.img.structure,
+                (p_list[j] & 0x20) >> 5,
+                fs,
+                refPicListX);
+
+            refPicListX->frame_idx = fs->frame_num;
+            refPicListX->flags |= viddec_h264_get_is_long_term(fs) ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+            refPicListX++;
+        }
+    }
+
+    VTRACE("vbp_set_slice_ref_list_h264secure ---");
+}
+
+static void vbp_set_pre_weight_table_h264secure(
+    struct h264_viddec_parser* h264_parser,
+    VASliceParameterBufferH264 *slc_parms)
+{
+    h264_Slice_Header_t* slice_header = &(h264_parser->info.SliceHeader);
+    int i, j;
+    if ((((h264_PtypeP == slice_header->slice_type) ||
+          (h264_PtypeB == slice_header->slice_type)) &&
+          h264_parser->info.active_PPS.weighted_pred_flag) ||
+         ((h264_PtypeB == slice_header->slice_type) &&
+         (1 == h264_parser->info.active_PPS.weighted_bipred_idc)))
+    {
+        slc_parms->luma_log2_weight_denom = slice_header->sh_predwttbl.luma_log2_weight_denom;
+        slc_parms->chroma_log2_weight_denom = slice_header->sh_predwttbl.chroma_log2_weight_denom;
+        slc_parms->luma_weight_l0_flag = slice_header->sh_predwttbl.luma_weight_l0_flag;
+        slc_parms->chroma_weight_l0_flag = slice_header->sh_predwttbl.chroma_weight_l0_flag;
+        slc_parms->luma_weight_l1_flag = slice_header->sh_predwttbl.luma_weight_l1_flag;
+        slc_parms->chroma_weight_l1_flag = slice_header->sh_predwttbl.chroma_weight_l1_flag;
+
+        for (i = 0; i < 32; i++)
+        {
+            slc_parms->luma_weight_l0[i] = slice_header->sh_predwttbl.luma_weight_l0[i];
+            slc_parms->luma_offset_l0[i] = slice_header->sh_predwttbl.luma_offset_l0[i];
+
+            slc_parms->luma_weight_l1[i] = slice_header->sh_predwttbl.luma_weight_l1[i];
+            slc_parms->luma_offset_l1[i] = slice_header->sh_predwttbl.luma_offset_l1[i];
+
+            for (j = 0; j < 2; j++)
+            {
+                slc_parms->chroma_weight_l0[i][j] = slice_header->sh_predwttbl.chroma_weight_l0[i][j];
+                slc_parms->chroma_offset_l0[i][j] = slice_header->sh_predwttbl.chroma_offset_l0[i][j];
+                slc_parms->chroma_weight_l1[i][j] = slice_header->sh_predwttbl.chroma_weight_l1[i][j];
+                slc_parms->chroma_offset_l1[i][j] = slice_header->sh_predwttbl.chroma_offset_l1[i][j];
+            }
+        }
+    }
+    else
+    {
+        /* default weight table */
+        slc_parms->luma_log2_weight_denom = 5;
+        slc_parms->chroma_log2_weight_denom = 5;
+        slc_parms->luma_weight_l0_flag = 0;
+        slc_parms->luma_weight_l1_flag = 0;
+        slc_parms->chroma_weight_l0_flag = 0;
+        slc_parms->chroma_weight_l1_flag = 0;
+        for (i = 0; i < 32; i++)
+        {
+            slc_parms->luma_weight_l0[i] = 0;
+            slc_parms->luma_offset_l0[i] = 0;
+            slc_parms->luma_weight_l1[i] = 0;
+            slc_parms->luma_offset_l1[i] = 0;
+
+            for (j = 0; j < 2; j++)
+            {
+                slc_parms->chroma_weight_l0[i][j] = 0;
+                slc_parms->chroma_offset_l0[i][j] = 0;
+                slc_parms->chroma_weight_l1[i][j] = 0;
+                slc_parms->chroma_offset_l1[i][j] = 0;
+            }
+        }
+    }
+}
+
+
+static void vbp_set_reference_frames_h264secure(
+    struct h264_viddec_parser *parser,
+    VAPictureParameterBufferH264* pic_parms)
+{
+    int buffer_idx;
+    int frame_idx;
+    frame_store* store = NULL;
+    h264_DecodedPictureBuffer* dpb = &(parser->info.dpb);
+    /* initialize reference frames */
+    for (frame_idx = 0; frame_idx < 16; frame_idx++)
+    {
+        pic_parms->ReferenceFrames[frame_idx].picture_id = VA_INVALID_SURFACE;
+        pic_parms->ReferenceFrames[frame_idx].frame_idx = 0;
+        pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_INVALID;
+        pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = 0;
+        pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = 0;
+    }
+    pic_parms->num_ref_frames = 0;
+
+    frame_idx = 0;
+
+    /* ITRACE("short term frame in dpb %d", dpb->ref_frames_in_buffer);  */
+    /* set short term reference frames */
+    for (buffer_idx = 0; buffer_idx < dpb->ref_frames_in_buffer; buffer_idx++)
+    {
+        if (frame_idx >= 16 || buffer_idx >= 16)
+        {
+            WTRACE("Frame index is out of bound.");
+            break;
+        }
+
+        store = &dpb->fs[dpb->fs_ref_idc[buffer_idx]];
+        /* if (store->is_used == 3 && store->frame.used_for_reference == 3) */
+        if (viddec_h264_get_is_used(store))
+        {
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = store->frame_num;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+            if (FRAME == parser->info.img.structure)
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+            }
+            else
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+                if (store->top_field.used_for_reference && store->bottom_field.used_for_reference)
+                {
+                    /* if both fields are used for reference, just set flag to be frame (0) */
+                }
+                else
+                {
+                    if (store->top_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_TOP_FIELD;
+                    if (store->bottom_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_BOTTOM_FIELD;
+                }
+            }
+        }
+        frame_idx++;
+    }
+
+    /* set long term reference frames */
+    for (buffer_idx = 0; buffer_idx < dpb->ltref_frames_in_buffer; buffer_idx++)
+    {
+        if (frame_idx >= 16 || buffer_idx >= 16)
+        {
+            WTRACE("Frame index is out of bound.");
+            break;
+        }
+        store = &dpb->fs[dpb->fs_ltref_idc[buffer_idx]];
+        if (!viddec_h264_get_is_long_term(store))
+        {
+            WTRACE("long term frame is not marked as long term.");
+        }
+        /*if (store->is_used == 3 && store->is_long_term && store->frame.used_for_reference == 3) */
+        if (viddec_h264_get_is_used(store))
+        {
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_LONG_TERM_REFERENCE;
+            if (FRAME == parser->info.img.structure)
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->frame.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->frame.poc;
+            }
+            else
+            {
+                pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = store->top_field.poc;
+                pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = store->bottom_field.poc;
+                if (store->top_field.used_for_reference && store->bottom_field.used_for_reference)
+                {
+                    /* if both fields are used for reference, just set flag to be frame (0)*/
+                }
+                else
+                {
+                    if (store->top_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_TOP_FIELD;
+                    if (store->bottom_field.used_for_reference)
+                        pic_parms->ReferenceFrames[frame_idx].flags |= VA_PICTURE_H264_BOTTOM_FIELD;
+                }
+            }
+        }
+        frame_idx++;
+    }
+
+    pic_parms->num_ref_frames = frame_idx;
+    if (frame_idx > parser->info.active_SPS.num_ref_frames)
+    {
+        WTRACE("actual num_ref_frames (%d) exceeds the value in the sequence header (%d).",
+               frame_idx, parser->info.active_SPS.num_ref_frames);
+    }
+}
+
+
+static void vbp_set_scaling_list_h264secure(
+    struct h264_viddec_parser *parser,
+    VAIQMatrixBufferH264* IQ_matrix_buf)
+{
+    int i;
+    int lists_to_set = 6 + 2 * (parser->info.active_PPS.transform_8x8_mode_flag ? 1 : 0);
+
+    if (parser->info.active_PPS.pic_scaling_matrix_present_flag)
+    {
+        for (i = 0; i < lists_to_set; i++)
+        {
+            if (parser->info.active_PPS.pic_scaling_list_present_flag[i])
+            {
+                if (((i < 6) && parser->info.active_PPS.UseDefaultScalingMatrix4x4Flag[i]) ||
+                        ((i >= 6) && parser->info.active_PPS.UseDefaultScalingMatrix8x8Flag[i-6]))
+                {
+                    /* use default scaling list */
+                    if (i < 6)
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                    }
+                    else
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                    }
+                }
+                else
+                {
+                    /* use PPS list */
+                    if (i < 6)
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], parser->info.active_PPS.ScalingList4x4[i], 16);
+                    }
+                    else
+                    {
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], parser->info.active_PPS.ScalingList8x8[i - 6], 64);
+                    }
+                }
+            }
+            else /* pic_scaling_list not present */
+            {
+                if (parser->info.active_SPS.seq_scaling_matrix_present_flag)
+                {
+                    /* SPS matrix present - use fallback rule B */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               parser->info.active_SPS.seq_scaling_list_present_flag[i] ? parser->info.active_PPS.ScalingList4x4[i] : UseDefaultList[i],
+                               16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6],
+                               parser->info.active_SPS.seq_scaling_list_present_flag[i] ? parser->info.active_PPS.ScalingList8x8[i - 6] : UseDefaultList[i],
+                               64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        //g_warning("invalid scaling list index.");
+                        break;
+                    }
+                }
+                else /* seq_scaling_matrix not present */
+                {
+                    /* SPS matrix not present - use fallback rule A */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        WTRACE("invalid scaling list index.");
+                        break;
+                    }
+                } /* end of seq_scaling_matrix not present */
+            } /* end of  pic_scaling_list not present */
+        } /* for loop for each index from 0 to 7 */
+    } /* end of pic_scaling_matrix present */
+    else
+    {
+        /* PPS matrix not present, use SPS information */
+        if (parser->info.active_SPS.seq_scaling_matrix_present_flag)
+        {
+            for (i = 0; i < lists_to_set; i++)
+            {
+                if (parser->info.active_SPS.seq_scaling_list_present_flag[i])
+                {
+                    if (((i < 6) && parser->info.active_SPS.UseDefaultScalingMatrix4x4Flag[i]) ||
+                            ((i >= 6) && parser->info.active_SPS.UseDefaultScalingMatrix8x8Flag[i - 6]))
+                    {
+                        /* use default scaling list */
+                        if (i < 6)
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        }
+                        else
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        }
+                    }
+                    else
+                    {
+                        /* use SPS list */
+                        if (i < 6)
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList4x4[i], parser->info.active_SPS.ScalingList4x4[i], 16);
+                        }
+                        else
+                        {
+                            memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], parser->info.active_SPS.ScalingList8x8[i - 6], 64);
+                        }
+                    }
+                }
+                else
+                {
+                    /* SPS list not present - use fallback rule A */
+                    switch (i)
+                    {
+                    case 0:
+                    case 3:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i], UseDefaultList[i], 16);
+                        break;
+
+                    case 6:
+                    case 7:
+                        memcpy(IQ_matrix_buf->ScalingList8x8[i - 6], UseDefaultList[i], 64);
+                        break;
+
+                    case 1:
+                    case 2:
+                    case 4:
+                    case 5:
+                        memcpy(IQ_matrix_buf->ScalingList4x4[i],
+                               IQ_matrix_buf->ScalingList4x4[i - 1],
+                               16);
+                        break;
+
+                    default:
+                        WTRACE("invalid scaling list index.");
+                        break;
+                    }
+                }
+            }
+        }
+        else
+        {
+            /* SPS matrix not present - use flat lists */
+            for (i = 0; i < 6; i++)
+            {
+                memcpy(IQ_matrix_buf->ScalingList4x4[i], quant_flat, 16);
+            }
+            for (i = 0; i < 2; i++)
+            {
+                memcpy(IQ_matrix_buf->ScalingList8x8[i], quant8_flat, 64);
+            }
+        }
+    }
+
+    if ((0 == parser->info.active_PPS.transform_8x8_mode_flag) &&
+            (parser->info.active_PPS.pic_scaling_matrix_present_flag ||
+             parser->info.active_SPS.seq_scaling_matrix_present_flag))
+    {
+        for (i = 0; i < 2; i++)
+        {
+            memcpy(IQ_matrix_buf->ScalingList8x8[i], quant8_flat, 64);
+        }
+    }
+}
+static void vbp_set_pic_parse_buffer_h264secure(
+    struct h264_viddec_parser *parser,
+     vbp_data_h264 *query_data)
+{
+    VTRACE("vbp_set_pic_parse_buffer_h264secure starts");
+    VAParsePictureParameterBuffer *buf = query_data->pic_parse_buffer;
+
+    buf->flags.bits.frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+    VTRACE("frame_mbs_only_flag = %d", buf->flags.bits.frame_mbs_only_flag);
+
+    buf->flags.bits.pic_order_present_flag = parser->info.active_PPS.pic_order_present_flag;
+    VTRACE("pic_order_present_flag = %d", buf->flags.bits.pic_order_present_flag);
+
+    buf->flags.bits.delta_pic_order_always_zero_flag = parser->info.active_SPS.delta_pic_order_always_zero_flag;
+    VTRACE("delta_pic_order_always_zero_flag = %d", buf->flags.bits.delta_pic_order_always_zero_flag);
+
+    buf->flags.bits.redundant_pic_cnt_present_flag = parser->info.active_PPS.redundant_pic_cnt_present_flag;
+    VTRACE("redundant_pic_cnt_present_flag = %d", buf->flags.bits.redundant_pic_cnt_present_flag);
+
+    buf->flags.bits.weighted_pred_flag = parser->info.active_PPS.weighted_pred_flag;
+    VTRACE("weighted_pred_flag = %d", buf->flags.bits.weighted_pred_flag);
+
+    buf->flags.bits.entropy_coding_mode_flag = parser->info.active_PPS.entropy_coding_mode_flag;
+    VTRACE("entropy_coding_mode_flag = %d", buf->flags.bits.entropy_coding_mode_flag);
+
+    buf->flags.bits.deblocking_filter_control_present_flag = parser->info.active_PPS.deblocking_filter_control_present_flag;
+    VTRACE("deblocking_filter_control_present_flag = %d", buf->flags.bits.deblocking_filter_control_present_flag);
+
+    buf->flags.bits.weighted_bipred_idc = parser->info.active_PPS.weighted_bipred_idc;
+    VTRACE("weighted_bipred_idc = %d", buf->flags.bits.weighted_bipred_idc);
+
+    buf->num_slice_groups_minus1 = parser->info.active_PPS.num_slice_groups_minus1;
+    VTRACE("num_slice_groups_minus1 = %d", buf->num_slice_groups_minus1);
+
+    buf->slice_group_map_type = parser->info.active_PPS.slice_group_map_type;
+    VTRACE("slice_group_map_type = %d", buf->slice_group_map_type);
+
+    buf->log2_slice_group_change_cycle = 0;
+
+    buf->chroma_format_idc = parser->info.active_SPS.sps_disp.chroma_format_idc;
+    VTRACE("chroma_format_idc = %d", buf->chroma_format_idc);
+
+    buf->log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+    VTRACE("log2_max_pic_order_cnt_lsb_minus4 = %d", buf->log2_max_pic_order_cnt_lsb_minus4);
+
+    buf->pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+    VTRACE("pic_order_cnt_type = %d", buf->pic_order_cnt_type);
+
+    buf->log2_max_frame_num_minus4 = parser->info.active_SPS.log2_max_frame_num_minus4;
+    VTRACE("log2_max_frame_num_minus4 = %d", buf->log2_max_frame_num_minus4);
+
+
+    buf->residual_colour_transform_flag = parser->info.active_SPS.residual_colour_transform_flag;
+    VTRACE("residual_colour_transform_flag = %d", buf->residual_colour_transform_flag);
+
+    buf->num_ref_idc_l0_active_minus1= parser->info.active_PPS.num_ref_idx_l0_active-1;
+    VTRACE("num_ref_idc_l0_active_minus1 = %d", buf->num_ref_idc_l0_active_minus1);
+
+    buf->num_ref_idc_l1_active_minus1= parser->info.active_PPS.num_ref_idx_l1_active-1;
+    VTRACE("num_ref_idc_l1_active_minus1 = %d", buf->num_ref_idc_l1_active_minus1);
+
+    VTRACE("vbp_set_pic_parse_buffer_h264secure ends");
+}
+
+static void vbp_set_codec_data_h264secure(
+    struct h264_viddec_parser *parser,
+     vbp_data_h264 *query_data)
+{
+    VTRACE("vbp_set_codec_data_h264secure +++");
+    vbp_codec_data_h264* codec_data = query_data->codec_data;
+
+    /* The following variables are used to detect if there is new SPS or PPS */
+    uint8 seq_parameter_set_id = codec_data->seq_parameter_set_id;
+    uint8 pic_parameter_set_id = codec_data->pic_parameter_set_id;
+    int frame_width = codec_data->frame_width;
+    int frame_height = codec_data->frame_height;
+
+    /* parameter id */
+    codec_data->seq_parameter_set_id = parser->info.active_SPS.seq_parameter_set_id;
+    codec_data->pic_parameter_set_id = parser->info.active_PPS.pic_parameter_set_id;
+
+    /* profile and level */
+    codec_data->profile_idc = parser->info.active_SPS.profile_idc;
+    codec_data->level_idc = parser->info.active_SPS.level_idc;
+
+
+    /*constraint flag sets (h.264 Spec v2009)*/
+    codec_data->constraint_set0_flag = (parser->info.active_SPS.constraint_set_flags & 0x10) >> 4;
+    codec_data->constraint_set1_flag = (parser->info.active_SPS.constraint_set_flags & 0x8) >> 3;
+    codec_data->constraint_set2_flag = (parser->info.active_SPS.constraint_set_flags & 0x4) >> 2;
+    codec_data->constraint_set3_flag = (parser->info.active_SPS.constraint_set_flags & 0x2) >> 1;
+    codec_data->constraint_set4_flag = parser->info.active_SPS.constraint_set_flags & 0x1;
+
+    /* reference frames */
+    codec_data->num_ref_frames = parser->info.active_SPS.num_ref_frames;
+
+    if (!parser->info.active_SPS.sps_disp.frame_mbs_only_flag &&
+        !parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag)
+    {
+        /* no longer necessary: two fields share the same interlaced surface */
+        /* codec_data->num_ref_frames *= 2; */
+    }
+
+    codec_data->gaps_in_frame_num_value_allowed_flag = parser->info.active_SPS.gaps_in_frame_num_value_allowed_flag;
+
+    /* frame coding */
+    codec_data->frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+    codec_data->mb_adaptive_frame_field_flag = parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag;
+
+    /* frame dimension */
+    codec_data->frame_width = (parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1 + 1 ) * 16;
+
+    codec_data->frame_height = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+                               (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) * 16;
+
+    /* cropping information */
+    codec_data->crop_left = 0;
+    codec_data->crop_right = 0;
+    codec_data->crop_top = 0;
+    codec_data->crop_bottom = 0;
+    if(parser->info.active_SPS.sps_disp.frame_cropping_flag) {
+        int CropUnitX = 0, CropUnitY = 0, SubWidthC = 0, SubHeightC = 0;
+        int ChromaArrayType = 0;
+        if(parser->info.active_SPS.sps_disp.separate_colour_plane_flag == 0) {
+            if(parser->info.active_SPS.sps_disp.chroma_format_idc == 1) {
+                SubWidthC = 2;
+                SubHeightC = 2;
+            } else if( parser->info.active_SPS.sps_disp.chroma_format_idc == 2) {
+                SubWidthC = 2;
+                SubHeightC = 1;
+            } else if( parser->info.active_SPS.sps_disp.chroma_format_idc == 3) {
+                SubWidthC = 1;
+                SubHeightC = 1;
+            }
+            ChromaArrayType = parser->info.active_SPS.sps_disp.chroma_format_idc;
+        }
+
+        if(ChromaArrayType == 0) {
+            CropUnitX = 1;
+            CropUnitY = 2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+        } else {
+            CropUnitX = SubWidthC;
+            CropUnitY = SubHeightC * ( 2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag);
+        }
+
+        codec_data->crop_left = CropUnitX * parser->info.active_SPS.sps_disp.frame_crop_rect_left_offset;
+        codec_data->crop_right = CropUnitX * parser->info.active_SPS.sps_disp.frame_crop_rect_right_offset;
+        codec_data->crop_top = CropUnitY * parser->info.active_SPS.sps_disp.frame_crop_rect_top_offset;
+        codec_data->crop_bottom = CropUnitY * parser->info.active_SPS.sps_disp.frame_crop_rect_bottom_offset;
+    }
+    /* aspect ratio */
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.aspect_ratio_info_present_flag)
+    {
+        codec_data->aspect_ratio_idc =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.aspect_ratio_idc;
+
+        if (codec_data->aspect_ratio_idc < 17)
+        {
+            codec_data->sar_width = h264_aspect_ratio_table[codec_data->aspect_ratio_idc][0];
+            codec_data->sar_height = h264_aspect_ratio_table[codec_data->aspect_ratio_idc][1];
+        }
+        else if (codec_data->aspect_ratio_idc == 255)
+        {
+            codec_data->sar_width =
+                parser->info.active_SPS.sps_disp.vui_seq_parameters.sar_width;
+
+            codec_data->sar_height =
+                parser->info.active_SPS.sps_disp.vui_seq_parameters.sar_height;
+        }
+        else
+        {
+            codec_data->sar_width = 0;
+            codec_data->sar_height = 0;
+        }
+    }
+    else
+    {
+        // unspecified
+        codec_data->aspect_ratio_idc = 0;
+        codec_data->sar_width = 0;
+        codec_data->sar_height = 0;
+    }
+
+    /* video format */
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.video_signal_type_present_flag)
+    {
+        codec_data->video_format =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.video_format;
+    }
+    else
+    {
+        // Unspecified video format
+        codec_data->video_format = 5;
+    }
+
+    codec_data->video_full_range_flag =
+        parser->info.active_SPS.sps_disp.vui_seq_parameters.video_full_range_flag;
+
+
+    if (parser->info.active_SPS.sps_disp.vui_seq_parameters.colour_description_present_flag)
+    {
+        codec_data->matrix_coefficients =
+            parser->info.active_SPS.sps_disp.vui_seq_parameters.matrix_coefficients;
+    }
+    else
+    {
+        // Unspecified
+        codec_data->matrix_coefficients = 2;
+    }
+
+    codec_data->bit_rate = parser->info.active_SPS.sps_disp.vui_seq_parameters.bit_rate_value;
+
+    /* picture order type and count */
+    codec_data->log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+    codec_data->pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+
+
+    /* udpate sps and pps status */
+    query_data->new_sps = (seq_parameter_set_id != parser->info.active_PPS.seq_parameter_set_id) ? 1 : 0;
+    query_data->new_pps = (pic_parameter_set_id != parser->info.active_PPS.pic_parameter_set_id) ? 1 : 0;
+
+    query_data->has_sps = parser->info.active_SPS.seq_parameter_set_id != 0xff;
+    query_data->has_pps = parser->info.active_PPS.seq_parameter_set_id != 0xff;
+    VTRACE("parser->info.active_SPS.seq_parameter_set_id = %d", parser->info.active_SPS.seq_parameter_set_id);
+    VTRACE("parser->info.active_PPS.seq_parameter_set_id = %d", parser->info.active_PPS.seq_parameter_set_id);
+    VTRACE("has_sps = %d, has_pps %d", query_data->has_sps, query_data->has_pps);
+
+    if ( frame_width != codec_data->frame_width || frame_height != codec_data->frame_height)
+    {
+        query_data->new_sps = 1;
+        query_data->new_pps = 1;
+    }
+    VAPictureParameterBufferH264 *pic_parms;
+    pic_parms = query_data->pic_data[0].pic_parms;
+    pic_parms->picture_width_in_mbs_minus1 = parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1;
+    /* frame height in MBS */
+    pic_parms->picture_height_in_mbs_minus1 = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+            (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) - 1;
+    if (parser->info.has_slice) {
+        VTRACE("Found slice info in the buffer");
+        codec_data->has_slice= 1;
+    } else {
+        VTRACE("No slice info in the buffer");
+        codec_data->has_slice = 0;
+    }
+
+    VTRACE("vbp_set_codec_data_h264secure ---");
+}
+
+
+static uint32_t vbp_add_pic_data_h264secure(vbp_context *pcontext)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    VTRACE("vbp_add_pic_data_h264secure +++");
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    struct h264_viddec_parser* parser = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+    VAPictureParameterBufferH264* pic_parms = NULL;
+
+    parser = (struct h264_viddec_parser *)cxt->codec_data;
+
+    if (0 == parser->info.SliceHeader.first_mb_in_slice)
+    {
+        /* a new picture is parsed */
+        query_data->num_pictures++;
+    }
+
+    if (query_data->num_pictures == 0)
+    {
+        /* partial frame */
+        query_data->num_pictures = 1;
+    }
+
+    if (query_data->num_pictures > MAX_NUM_PICTURES)
+    {
+        ETRACE("num of pictures exceeds the limit (%d).", MAX_NUM_PICTURES);
+        return VBP_DATA;
+    }
+
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        WTRACE("MB address does not start from 0!");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+    pic_parms = pic_data->pic_parms;
+
+    // relax this condition to support partial frame parsing
+
+    //if (parser->info.SliceHeader.first_mb_in_slice == 0)
+    {
+        /**
+        * picture parameter only needs to be set once,
+        * even multiple slices may be encoded
+        */
+
+        /* VAPictureParameterBufferH264 */
+        pic_parms->CurrPic.picture_id = VA_INVALID_SURFACE;
+        pic_parms->CurrPic.frame_idx = 0;
+        if (parser->info.img.field_pic_flag == 1)
+        {
+            if (parser->info.img.bottom_field_flag)
+            {
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_BOTTOM_FIELD;
+            }
+            else
+            {
+                /* also OK set to 0 (from test suite) */
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_TOP_FIELD;
+            }
+        }
+        else
+        {
+            pic_parms->CurrPic.flags = 0; /* frame picture */
+        }
+        pic_parms->CurrPic.TopFieldOrderCnt = parser->info.img.toppoc;
+        pic_parms->CurrPic.BottomFieldOrderCnt = parser->info.img.bottompoc;
+        pic_parms->CurrPic.frame_idx = parser->info.SliceHeader.frame_num;
+
+        /* don't care if current frame is used as long term reference */
+        if (parser->info.SliceHeader.nal_ref_idc != 0)
+        {
+            pic_parms->CurrPic.flags |= VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+        }
+
+        pic_parms->picture_width_in_mbs_minus1 = parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1;
+        pic_parms->picture_height_in_mbs_minus1 = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+                (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) - 1;
+        pic_parms->bit_depth_luma_minus8 = parser->info.active_SPS.bit_depth_luma_minus8;
+        pic_parms->bit_depth_chroma_minus8 = parser->info.active_SPS.bit_depth_chroma_minus8;
+        pic_parms->seq_fields.value = 0;
+        pic_parms->seq_fields.bits.chroma_format_idc = parser->info.active_SPS.sps_disp.chroma_format_idc;
+        pic_parms->seq_fields.bits.residual_colour_transform_flag = parser->info.active_SPS.residual_colour_transform_flag;
+        pic_parms->seq_fields.bits.frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+        pic_parms->seq_fields.bits.mb_adaptive_frame_field_flag = parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag;
+        pic_parms->seq_fields.bits.direct_8x8_inference_flag = parser->info.active_SPS.sps_disp.direct_8x8_inference_flag;
+
+        /* new fields in libva 0.31 */
+        pic_parms->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = parser->info.active_SPS.gaps_in_frame_num_value_allowed_flag;
+        pic_parms->seq_fields.bits.log2_max_frame_num_minus4 = parser->info.active_SPS.log2_max_frame_num_minus4;
+        pic_parms->seq_fields.bits.pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+        pic_parms->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+        pic_parms->seq_fields.bits.delta_pic_order_always_zero_flag =parser->info.active_SPS.delta_pic_order_always_zero_flag;
+
+
+        /* referened from UMG_Moorstown_TestSuites */
+        pic_parms->seq_fields.bits.MinLumaBiPredSize8x8 = (parser->info.active_SPS.level_idc > 30) ? 1 : 0;
+
+        pic_parms->num_slice_groups_minus1 = parser->info.active_PPS.num_slice_groups_minus1;
+        pic_parms->slice_group_map_type = parser->info.active_PPS.slice_group_map_type;
+        pic_parms->slice_group_change_rate_minus1 = 0;
+        pic_parms->pic_init_qp_minus26 = parser->info.active_PPS.pic_init_qp_minus26;
+        pic_parms->pic_init_qs_minus26 = 0;
+        pic_parms->chroma_qp_index_offset = parser->info.active_PPS.chroma_qp_index_offset;
+        pic_parms->second_chroma_qp_index_offset = parser->info.active_PPS.second_chroma_qp_index_offset;
+
+        pic_parms->pic_fields.value = 0;
+        pic_parms->pic_fields.bits.entropy_coding_mode_flag = parser->info.active_PPS.entropy_coding_mode_flag;
+        pic_parms->pic_fields.bits.weighted_pred_flag = parser->info.active_PPS.weighted_pred_flag;
+        pic_parms->pic_fields.bits.weighted_bipred_idc = parser->info.active_PPS.weighted_bipred_idc;
+        pic_parms->pic_fields.bits.transform_8x8_mode_flag = parser->info.active_PPS.transform_8x8_mode_flag;
+
+        /* new LibVA fields in v0.31*/
+        pic_parms->pic_fields.bits.pic_order_present_flag = parser->info.active_PPS.pic_order_present_flag;
+        pic_parms->pic_fields.bits.deblocking_filter_control_present_flag = parser->info.active_PPS.deblocking_filter_control_present_flag;
+        pic_parms->pic_fields.bits.redundant_pic_cnt_present_flag = parser->info.active_PPS.redundant_pic_cnt_present_flag;
+        pic_parms->pic_fields.bits.reference_pic_flag = parser->info.SliceHeader.nal_ref_idc != 0;
+
+        /* all slices in the pciture have the same field_pic_flag */
+        pic_parms->pic_fields.bits.field_pic_flag = parser->info.SliceHeader.field_pic_flag;
+        pic_parms->pic_fields.bits.constrained_intra_pred_flag = parser->info.active_PPS.constrained_intra_pred_flag;
+
+        pic_parms->frame_num = parser->info.SliceHeader.frame_num;
+    }
+
+
+    /* set reference frames, and num_ref_frames */
+    vbp_set_reference_frames_h264secure(parser, pic_parms);
+    if (parser->info.nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        int frame_idx;
+        for (frame_idx = 0; frame_idx < 16; frame_idx++)
+        {
+            pic_parms->ReferenceFrames[frame_idx].picture_id = VA_INVALID_SURFACE;
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = 0;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_INVALID;
+            pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = 0;
+            pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = 0;
+        }
+        /* num of reference frame is 0 if current picture is IDR */
+        pic_parms->num_ref_frames = 0;
+    }
+    else
+    {
+        /* actual num_ref_frames is set in vbp_set_reference_frames_h264 */
+    }
+
+    VTRACE("vbp_add_pic_data_h264secure ---");
+    return VBP_OK;
+}
+
+static uint32_t vbp_add_slice_data_h264secure(vbp_context *pcontext, uint32 key)
+{
+    VTRACE("vbp_add_slice_data_h264secure +++");
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    uint32 bit, byte;
+    uint8 is_emul;
+
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    VASliceParameterBufferH264 *slc_parms = NULL;
+    vbp_slice_data_h264 *slc_data = NULL;
+    struct h264_viddec_parser* h264_parser = NULL;
+    h264_Slice_Header_t* slice_header = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+
+
+    h264_parser = (struct h264_viddec_parser *)cxt->codec_data;
+    int pic_data_index = query_data->num_pictures - 1;
+    VTRACE("pic_data_index = %d", pic_data_index);
+
+    if (pic_data_index < 0)
+    {
+        ETRACE("invalid picture data index.");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+    VTRACE("pic_data->num_slices = %d", pic_data->num_slices);
+
+    slc_data = &(pic_data->slc_data[pic_data->num_slices]);
+    slc_data->buffer_addr = cxt->parse_cubby.buf;
+    slc_parms = &(slc_data->slc_parms);
+
+    slc_data->nal_unit_type = h264_parser->info.nal_unit_type;
+
+
+    /* No longer used, MSVDX firmware will maintain it */
+    slc_data->slice_offset = 0;
+    slc_parms->slice_data_offset = 0;
+    slc_parms->slice_data_size = 0;
+
+    /* whole slice is in this buffer */
+    slc_parms->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
+
+    // The slice key is stored in slice_data_bit_offset and passed to MSVDX
+    slc_parms->slice_data_bit_offset = key;
+
+    slice_header = &(h264_parser->info.SliceHeader);
+    slc_parms->first_mb_in_slice = slice_header->first_mb_in_slice;
+
+    if (h264_parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag &
+            (!(h264_parser->info.SliceHeader.field_pic_flag)))
+    {
+        slc_parms->first_mb_in_slice /= 2;
+    }
+
+    slc_parms->slice_type = slice_header->slice_type;
+
+    slc_parms->direct_spatial_mv_pred_flag = slice_header->direct_spatial_mv_pred_flag;
+
+    slc_parms->num_ref_idx_l0_active_minus1 = 0;
+    slc_parms->num_ref_idx_l1_active_minus1 = 0;
+    if (slice_header->slice_type == h264_PtypeI)
+    {
+    }
+    else if (slice_header->slice_type == h264_PtypeP)
+    {
+        slc_parms->num_ref_idx_l0_active_minus1 = slice_header->num_ref_idx_l0_active - 1;
+    }
+    else if (slice_header->slice_type == h264_PtypeB)
+    {
+        slc_parms->num_ref_idx_l0_active_minus1 = slice_header->num_ref_idx_l0_active - 1;
+        slc_parms->num_ref_idx_l1_active_minus1 = slice_header->num_ref_idx_l1_active - 1;
+    }
+    else
+    {
+        WTRACE("slice type %d is not supported.", slice_header->slice_type);
+    }
+
+    slc_parms->cabac_init_idc = slice_header->cabac_init_idc;
+    slc_parms->slice_qp_delta = slice_header->slice_qp_delta;
+    slc_parms->disable_deblocking_filter_idc = slice_header->disable_deblocking_filter_idc;
+    slc_parms->slice_alpha_c0_offset_div2 = slice_header->slice_alpha_c0_offset_div2;
+    slc_parms->slice_beta_offset_div2 = slice_header->slice_beta_offset_div2;
+
+
+    vbp_set_pre_weight_table_h264secure(h264_parser, slc_parms);
+    vbp_set_slice_ref_list_h264secure(h264_parser, slc_parms);
+
+
+    pic_data->num_slices++;
+
+    if (pic_data->num_slices > MAX_NUM_SLICES)
+    {
+        ETRACE("number of slices per picture exceeds the limit (%d).", MAX_NUM_SLICES);
+        return VBP_DATA;
+    }
+
+    if (pic_data->num_slices > 1)
+    {
+        ITRACE("number of slices per picture is %d.", pic_data->num_slices);
+    }
+
+    VTRACE("vbp_add_slice_data_h264secure ---");
+    return VBP_OK;
+}
+
+/**
+* parse decoder configuration data
+*/
+uint32 vbp_parse_init_data_h264secure(vbp_context* pcontext)
+{
+    /* parsing AVCDecoderConfigurationRecord structure (see MPEG-4 part 15 spec) */
+
+    uint8 configuration_version = 0;
+    uint8 AVC_profile_indication = 0;
+    uint8 profile_compatibility = 0;
+    uint8 AVC_level_indication = 0;
+    uint8 length_size_minus_one = 0;
+    uint8 num_of_sequence_parameter_sets = 0;
+    uint8 num_of_picture_parameter_sets = 0;
+    uint16 sequence_parameter_set_length = 0;
+    uint16 picture_parameter_set_length = 0;
+
+    int i = 0;
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+
+    vbp_h264_parser_private *parser_private = (vbp_h264_parser_private *)pcontext->parser_private;
+    //Enable emulation prevention
+    cxt->getbits.is_emul_reqd = 1;
+
+    /* check if configuration data is start code prefix */
+    viddec_sc_parse_cubby_cxt_t cubby = cxt->parse_cubby;
+    viddec_parser_ops_t *ops = pcontext->parser_ops;
+    int ret = ops->parse_sc((void *)&cubby,
+                            NULL, /* context, not used */
+                            &(cxt->sc_prefix_info));
+    if (ret == 1)
+    {
+        WTRACE("configuration data is start-code prefixed.\n");
+        parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+        return vbp_parse_start_code_h264secure(pcontext);
+    }
+
+
+    uint8* cur_data = cxt->parse_cubby.buf;
+
+
+    if (cxt->parse_cubby.size < 6)
+    {
+        /* need at least 6 bytes to start parsing the structure, see spec 15 */
+        return VBP_DATA;
+    }
+
+    configuration_version = *cur_data++;
+    AVC_profile_indication = *cur_data++;
+
+
+    profile_compatibility = *cur_data++;
+    AVC_level_indication = *cur_data++;
+
+    /* ITRACE("Level indication: %d", AVC_level_indication);*/
+    /* 2 bits of length_size_minus_one, 6 bits of reserved (11111) */
+    length_size_minus_one = (*cur_data) & 0x3;
+
+    if (length_size_minus_one != 3)
+    {
+        WTRACE("length size (%d) is not equal to 4.", length_size_minus_one + 1);
+    }
+
+    parser_private->NAL_length_size = length_size_minus_one + 1;
+
+    cur_data++;
+
+    /* 3 bits of reserved (111) and 5 bits of num_of_sequence_parameter_sets */
+    num_of_sequence_parameter_sets = (*cur_data) & 0x1f;
+    if (num_of_sequence_parameter_sets > 1)
+    {
+        WTRACE("num_of_sequence_parameter_sets is %d.", num_of_sequence_parameter_sets);
+    }
+    if (num_of_sequence_parameter_sets > MAX_NUM_SPS)
+    {
+        /* this would never happen as MAX_NUM_SPS = 32 */
+        WTRACE("num_of_sequence_parameter_sets (%d) exceeds the limit (%d).", num_of_sequence_parameter_sets, MAX_NUM_SPS);
+    }
+    cur_data++;
+
+    cxt->list.num_items = 0;
+    for (i = 0; i < num_of_sequence_parameter_sets; i++)
+    {
+        if (cur_data - cxt->parse_cubby.buf + 2 > cxt->parse_cubby.size)
+        {
+            /* need at least 2 bytes to parse sequence_parameter_set_length */
+            ETRACE("Not enough data to parse SPS length.");
+            return VBP_DATA;
+        }
+
+        /* 16 bits */
+        sequence_parameter_set_length = vbp_utils_ntohs(cur_data);
+
+
+        cur_data += 2;
+
+        if (cur_data - cxt->parse_cubby.buf + sequence_parameter_set_length > cxt->parse_cubby.size)
+        {
+            /* need at least sequence_parameter_set_length bytes for SPS */
+            ETRACE("Not enough data to parse SPS.");
+            return VBP_DATA;
+        }
+
+        cxt->list.data[cxt->list.num_items].stpos = cur_data - cxt->parse_cubby.buf;
+
+        /* end pos is exclusive */
+        cxt->list.data[cxt->list.num_items].edpos =
+            cxt->list.data[cxt->list.num_items].stpos + sequence_parameter_set_length;
+
+        cxt->list.num_items++;
+
+        cur_data += sequence_parameter_set_length;
+    }
+
+    if (cur_data - cxt->parse_cubby.buf + 1 > cxt->parse_cubby.size)
+    {
+        /* need at least one more byte to parse num_of_picture_parameter_sets */
+        ETRACE("Not enough data to parse number of PPS.");
+        return VBP_DATA;
+    }
+
+    num_of_picture_parameter_sets = *cur_data++;
+    if (num_of_picture_parameter_sets > 1)
+    {
+        /* g_warning("num_of_picture_parameter_sets is %d.", num_of_picture_parameter_sets); */
+    }
+
+    for (i = 0; i < num_of_picture_parameter_sets; i++)
+    {
+        if (cur_data - cxt->parse_cubby.buf + 2 > cxt->parse_cubby.size)
+        {
+            /* need at least 2 bytes to parse picture_parameter_set_length */
+            ETRACE("Not enough data to parse PPS length.");
+            return VBP_DATA;
+        }
+
+        /* 16 bits */
+        picture_parameter_set_length = vbp_utils_ntohs(cur_data);
+
+        cur_data += 2;
+
+        if (cur_data - cxt->parse_cubby.buf + picture_parameter_set_length > cxt->parse_cubby.size)
+        {
+            /* need at least picture_parameter_set_length bytes for PPS */
+            ETRACE("Not enough data to parse PPS.");
+            return VBP_DATA;
+        }
+
+        cxt->list.data[cxt->list.num_items].stpos = cur_data - cxt->parse_cubby.buf;
+
+        /* end pos is exclusive */
+        cxt->list.data[cxt->list.num_items].edpos =
+            cxt->list.data[cxt->list.num_items].stpos + picture_parameter_set_length;
+
+        cxt->list.num_items++;
+
+        cur_data += picture_parameter_set_length;
+    }
+
+    if ((cur_data - cxt->parse_cubby.buf) !=  cxt->parse_cubby.size)
+    {
+        WTRACE("Not all initialization data is parsed. Size = %d, parsed = %d.",
+               cxt->parse_cubby.size, (cur_data - cxt->parse_cubby.buf));
+    }
+
+    parser_private->bitstream_pattern = H264_BS_LENGTH_PREFIXED;
+    return VBP_OK;
+}
+
+static inline uint32_t vbp_get_NAL_length_h264(uint8_t* p, int *NAL_length_size)
+{
+    switch (*NAL_length_size)
+    {
+    case 4:
+        return vbp_utils_ntohl(p);
+
+    case 3:
+    {
+        uint32_t i = ((*p) << 16) + ((*(p+1)) << 8) + ((*(p+2)));
+        return i;
+    }
+
+    case 2:
+        return vbp_utils_ntohs(p);
+
+    case 1:
+        return *p;
+
+    default:
+        WTRACE("invalid NAL_length_size: %d.", *NAL_length_size);
+        /* default to 4 bytes for length */
+        *NAL_length_size = 4;
+        return vbp_utils_ntohl(p);
+    }
+}
+
+/**
+** H.264 elementary stream does not have start code.
+* instead, it is comprised of size of NAL unit and payload
+* of NAL unit. See spec 15 (Sample format)
+*/
+
+/* Start code prefix is 001 which is 3 bytes. */
+#define H264_SC_SIZE 3
+uint32 vbp_parse_start_code_h264secure(vbp_context *pcontext)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    vbp_h264_parser_private *parser_private = (vbp_h264_parser_private *)pcontext->parser_private;
+
+    /* reset query data for the new sample buffer */
+    vbp_data_h264* query_data = (vbp_data_h264*)pcontext->query_data;
+    int i;
+
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].num_slices = 0;
+    }
+    query_data->num_pictures = 0;
+
+    cxt->list.num_items = 0;
+
+    /* reset start position of first item to 0 in case there is only one item */
+    cxt->list.data[0].stpos = 0;
+
+    /* start code emulation prevention byte is present in NAL */
+    cxt->getbits.is_emul_reqd = 1;
+
+    if (parser_private->bitstream_pattern == H264_BS_LENGTH_PREFIXED)
+    {
+        viddec_sc_parse_cubby_cxt_t* cubby = NULL;
+        int32_t size_left = 0;
+        int32_t size_parsed = 0;
+        int32_t NAL_length = 0;
+
+        cubby = &(cxt->parse_cubby);
+
+        size_left = cubby->size;
+
+        while (size_left >= parser_private->NAL_length_size)
+        {
+            NAL_length = vbp_get_NAL_length_h264(cubby->buf + size_parsed, &parser_private->NAL_length_size);
+            if (NAL_length <= 0 || NAL_length > size_left - parser_private->NAL_length_size)
+            {
+                ETRACE("Invalid NAL_length parsed.");
+                break;
+            }
+
+            size_parsed += parser_private->NAL_length_size;
+            cxt->list.data[cxt->list.num_items].stpos = size_parsed;
+            size_parsed += NAL_length; /* skip NAL bytes */
+            /* end position is exclusive */
+            cxt->list.data[cxt->list.num_items].edpos = size_parsed;
+            cxt->list.num_items++;
+            if (cxt->list.num_items >= MAX_IBUFS_PER_SC)
+            {
+                ETRACE("num of list items exceeds the limit (%d).", MAX_IBUFS_PER_SC);
+                break;
+            }
+
+            size_left = cubby->size - size_parsed;
+        }
+
+        if (size_left != 0 && parser_private->length_prefix_verified == 0)
+        {
+            WTRACE("Elementary stream is not aligned (%d).", size_left);
+
+            /* attempt to correct length prefix to start-code prefix only once, if it succeeds, we will
+                    * alway treat bit stream as start-code prefixed; otherwise, treat bit stream as length prefixed
+                    */
+            parser_private->length_prefix_verified = 1;
+            viddec_sc_parse_cubby_cxt_t temp_cubby = cxt->parse_cubby;
+
+            viddec_parser_ops_t *ops = pcontext->parser_ops;
+            int ret = ops->parse_sc((void *)&temp_cubby,
+                                    NULL, /* context, not used */
+                                    &(cxt->sc_prefix_info));
+
+            /* found start code */
+            if (ret == 1)
+            {
+                WTRACE("Stream was supposed to be length prefixed, but actually is start-code prefixed.");
+                parser_private->NAL_length_size = 0;
+                parser_private->bitstream_pattern = H264_BS_SC_PREFIXED;
+                /* reset parsing data */
+                for (i = 0; i < MAX_NUM_PICTURES; i++)
+                {
+                    query_data->pic_data[i].num_slices = 0;
+                }
+                query_data->num_pictures = 0;
+                cxt->list.num_items = 0;
+            }
+        }
+    }
+
+
+    if (parser_private->bitstream_pattern == H264_BS_SC_PREFIXED)
+    {
+        viddec_sc_parse_cubby_cxt_t cubby;
+        /*  memory copy without updating cxt->parse_cubby */
+        cubby = cxt->parse_cubby;
+        viddec_parser_ops_t *ops = pcontext->parser_ops;
+        int ret = 0;
+
+        while (1)
+        {
+            ret = ops->parse_sc((void *)&cubby,
+                                NULL, /* context, not used */
+                                &(cxt->sc_prefix_info));
+            if (ret == 1)
+            {
+                if (cxt->list.num_items == 0)
+                {
+                    cxt->list.data[0].stpos = cubby.sc_end_pos;
+                }
+                else
+                {
+                    cxt->list.data[cxt->list.num_items].stpos =
+                        cubby.sc_end_pos + cxt->list.data[cxt->list.num_items - 1].stpos;
+                    cxt->list.data[cxt->list.num_items - 1].edpos = cxt->list.data[cxt->list.num_items].stpos - H264_SC_SIZE;
+                }
+
+                cubby.phase = 0;
+                cubby.buf = cxt->parse_cubby.buf +
+                            cxt->list.data[cxt->list.num_items].stpos;
+
+                cubby.size = cxt->parse_cubby.size -
+                             cxt->list.data[cxt->list.num_items].stpos;
+
+                cxt->list.num_items++;
+                if (cxt->list.num_items >= MAX_IBUFS_PER_SC)
+                {
+                    WTRACE("Num items exceeds the limit!");
+                    /* not fatal, just stop parsing */
+                    break;
+                }
+            }
+            else
+            {
+                if (cxt->list.num_items == 0)
+                {
+                    cxt->list.num_items = 1;
+                    parser_private->bitstream_pattern = H264_BS_SINGLE_NAL;
+                    WTRACE("Stream was supposed to be SC prefixed, but actually contains a single NAL.");
+                }
+                cxt->list.data[cxt->list.num_items - 1].edpos = cxt->parse_cubby.size;
+                break;
+            }
+        }
+
+    }
+
+    if (parser_private->bitstream_pattern == H264_BS_SINGLE_NAL)
+    {
+        cxt->list.num_items = 1;
+        cxt->list.data[0].stpos = 0;
+        cxt->list.data[0].edpos = cxt->parse_cubby.size;
+    }
+
+
+    return VBP_OK;
+}
+
+/**
+*
+* process parsing result after a NAL unit is parsed
+*
+*/
+uint32 vbp_process_parsing_result_h264secure( vbp_context *pcontext, int i)
+{
+    if (i >= MAX_NUM_SLICES)
+    {
+        return VBP_PARM;
+    }
+
+    uint32 error = VBP_OK;
+
+    struct h264_viddec_parser* parser = NULL;
+    parser = (struct h264_viddec_parser *)&( pcontext->parser_cxt->codec_data[0]);
+    vbp_data_h264* query_data = (vbp_data_h264 *)pcontext->query_data;
+    switch (parser->info.nal_unit_type)
+    {
+    case h264_NAL_UNIT_TYPE_SLICE:
+        VTRACE("slice header is parsed.");
+        break;
+
+    case  h264_NAL_UNIT_TYPE_IDR:
+        VTRACE("IDR header is parsed.");
+        break;
+    case h264_NAL_UNIT_TYPE_SEI:
+        VTRACE("SEI header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_SPS:
+        VTRACE("SPS header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_PPS:
+        VTRACE("PPS header is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+        VTRACE("ACC unit delimiter is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_EOSeq:
+        VTRACE("EOSeq is parsed.");
+        break;
+
+    case h264_NAL_UNIT_TYPE_EOstream:
+        VTRACE("EOStream is parsed");
+        break;
+
+    default:
+        WTRACE("unknown header %d is parsed.", parser->info.nal_unit_type);
+        break;
+    }
+
+    if (query_data->num_pictures == MAX_NUM_PICTURES && parser->info.img.field_pic_flag != 1)
+    {
+        WTRACE("more than one frame in the buffer is found(%d)", query_data->num_pictures);
+        return (error == VBP_OK ? VBP_MULTI : error);
+    }
+    return error;
+}
+
+/*
+*
+* fill query data structure after sample buffer is parsed
+*
+*/
+uint32 vbp_populate_query_data_h264secure(vbp_context *pcontext)
+{
+    vbp_data_h264 *query_data = NULL;
+    struct h264_viddec_parser *parser = NULL;
+    struct vbp_h264_parser_private_t* private = NULL;
+
+    parser = (struct h264_viddec_parser *)pcontext->parser_cxt->codec_data;
+    query_data = (vbp_data_h264 *)pcontext->query_data;
+    private = (struct vbp_h264_parser_private_t *)pcontext->parser_private;
+
+    vbp_set_codec_data_h264secure(parser, query_data);
+
+    vbp_set_pic_parse_buffer_h264secure(parser, query_data);
+
+    /* buffer number */
+    query_data->buf_number = buffer_counter;
+
+    /* VQIAMatrixBufferH264 */
+    vbp_set_scaling_list_h264secure(parser, query_data->IQ_matrix_buf);
+
+    return VBP_OK;
+}
+
+uint32 vbp_update_data_h264secure(vbp_context *pcontext, void *newdata, uint32 size)
+{
+    uint32 error = VBP_OK;
+    uint32 offset = 0;
+    uint32 key = 0;
+    uint32 i,j;
+    uint32 weight_pos = 0;
+    vbp_h264_sliceheader* sliceheader_p;
+    uint32 reordercmdnum = 0;
+    vbp_h264_sliceheader sliceheader;
+    sliceheader_p = &sliceheader;
+    memset(sliceheader_p, 0, sizeof(vbp_h264_sliceheader));
+    uint32 slice_num = 0;
+    vbp_data_h264* query_data = (vbp_data_h264*)pcontext->query_data;
+
+    for (i = 0; i < MAX_NUM_PICTURES; i++)
+    {
+        query_data->pic_data[i].num_slices = 0;
+    }
+    query_data->num_pictures = 0;
+
+
+    uint8_t *data_u8 = (uint8_t *) newdata;
+
+    while (offset < size) {
+        memcpy(&key, (uint8_t *)newdata+offset, sizeof(uint32));
+        VTRACE("key = %x", key);
+
+        if ((key == TERMINATE_KEY) || (key == BUF_TOO_SMALL_KEY) || (key == SLICE_TOO_MAY_KEY)) {
+            break;
+        }
+
+        slice_num++;
+        offset += sizeof(uint32);
+        VTRACE("offset = %d", offset);
+        sliceheader_p->sliceHeaderKey = key;
+        memcpy((void*)&sliceheader_p->parsedSliceHeader,
+            (uint8_t *)newdata+offset,
+            sizeof(VAParseSliceHeaderGroupBuffer));
+
+        reordercmdnum = sliceheader_p->parsedSliceHeader.num_reorder_cmds[0] +
+                        sliceheader_p->parsedSliceHeader.num_reorder_cmds[1];
+
+        for (i = 0; i < 2; i++) {
+            for (j = 0; j < 32; j++) {
+                if ((sliceheader_p->parsedSliceHeader.weights_present[i][0] >> j) & 0x01) {
+                    weight_pos += 2;
+                }
+                if ((sliceheader_p->parsedSliceHeader.weights_present[i][1] >> j) & 0x01) {
+                    weight_pos += 4;
+                }
+            }
+        }
+        sliceheader_p->reorder_cmd = (uint32_t *)((uint8_t *)newdata + offset + sizeof(VAParseSliceHeaderGroupBuffer));
+        sliceheader_p->weight      = (int16_t *)((uint8_t *)sliceheader_p->reorder_cmd + reordercmdnum * sizeof(uint32));
+
+        sliceheader_p->pic_marking = (uint32_t *)((uint8_t *)sliceheader_p->weight + weight_pos);
+        offset += sliceheader_p->parsedSliceHeader.size;
+        error = pcontext->parser_ops->update_data(pcontext->parser_cxt,
+                                                sliceheader_p, sizeof(vbp_h264_sliceheader));
+        if (error != VBP_OK)
+        {
+            ETRACE("update_data error = 0x%x",error);
+            return error;
+        }
+
+        error = vbp_add_pic_data_h264secure(pcontext);
+        if (error != VBP_OK)
+        {
+            ETRACE("vbp_add_pic_data_h264secure error = 0x%x",error);
+            return error;
+        }
+
+        error = vbp_add_slice_data_h264secure(pcontext,key);
+        if (error != VBP_OK)
+        {
+            ETRACE("vbp_add_slice_data_h264secure error = 0x%x",error);
+            return error;
+        }
+    }
+    if (key != TERMINATE_KEY)
+    {
+        ETRACE("Don't find a terminated key 0xFFFFFF!");
+        return VBP_DATA;
+    } else {
+        if (slice_num < 1) {
+            ETRACE("Don't find a valid slice header!");
+            return VBP_DATA;
+        }
+    }
+    error = vbp_populate_query_data_h264secure(pcontext);
+
+    if (error != VBP_OK)
+    {
+        ETRACE("vbp_populate_query_data_h264secure error = 0x%x",error);
+        return error;
+    }
+
+    return error;
+}
+
diff --git a/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.h b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.h
new file mode 100755
index 0000000..4b08360
--- /dev/null
+++ b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.h
@@ -0,0 +1,73 @@
+/* INTEL CONFIDENTIAL
+* Copyright (c) 2009 Intel Corporation.  All rights reserved.
+*
+* The source code contained or described herein and all documents
+* related to the source code ("Material") are owned by Intel
+* Corporation or its suppliers or licensors.  Title to the
+* Material remains with Intel Corporation or its suppliers and
+* licensors.  The Material contains trade secrets and proprietary
+* and confidential information of Intel or its suppliers and
+* licensors. The Material is protected by worldwide copyright and
+* trade secret laws and treaty provisions.  No part of the Material
+* may be used, copied, reproduced, modified, published, uploaded,
+* posted, transmitted, distributed, or disclosed in any way without
+* Intel's prior express written permission.
+*
+* No license under any patent, copyright, trade secret or other
+* intellectual property right is granted to or conferred upon you
+* by disclosure or delivery of the Materials, either expressly, by
+* implication, inducement, estoppel or otherwise. Any license
+* under such intellectual property rights must be express and
+* approved by Intel in writing.
+*
+*/
+
+
+#ifndef VBP_H264SECURE_PARSER_H
+#define VBP_H264SECURE_PARSER_H
+
+/*
+ * setup parser's entry points
+ */
+uint32 vbp_init_parser_entries_h264secure(vbp_context *pcontext);
+
+/*
+ * allocate query data
+ */
+uint32 vbp_allocate_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * free query data
+ */
+uint32 vbp_free_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * parse initialization data
+ */
+uint32 vbp_parse_init_data_h264secure(vbp_context *pcontext);
+
+/*
+ * parse start code. Only support lenght prefixed mode. Start
+ * code prefixed is not supported.
+ */
+uint32 vbp_parse_start_code_h264secure(vbp_context *pcontext);
+
+/*
+ * process parsing result
+ */
+uint32 vbp_process_parsing_result_h264secure(vbp_context *pcontext, int list_index);
+
+/*
+ * query parsing result
+ */
+uint32 vbp_populate_query_data_h264secure(vbp_context *pcontext);
+
+/*
+ * update the parsing result with extra data
+ */
+
+uint32 vbp_update_data_h264secure(vbp_context *pcontext, void *newdata, uint32 size);
+
+
+#endif /*VBP_H264_PARSER_H*/
+
diff --git a/mixvbp/vbp_manager/vbp_loader.c b/mixvbp/vbp_manager/vbp_loader.c
index 972ab2d..ab5914f 100755
--- a/mixvbp/vbp_manager/vbp_loader.c
+++ b/mixvbp/vbp_manager/vbp_loader.c
@@ -174,7 +174,7 @@
     return error;
 }
 
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 uint32 vbp_update(Handle hcontext, void *newdata, uint32 size, void **data)
 {
     vbp_context *pcontext;
diff --git a/mixvbp/vbp_manager/vbp_loader.h b/mixvbp/vbp_manager/vbp_loader.h
index ad4b106..46da5d7 100755
--- a/mixvbp/vbp_manager/vbp_loader.h
+++ b/mixvbp/vbp_manager/vbp_loader.h
@@ -50,6 +50,9 @@
 #ifndef uint32
 typedef unsigned int uint32;
 #endif
+#ifndef int16
+typedef short int16;
+#endif
 
 typedef void *Handle;
 
@@ -171,6 +174,7 @@
 
     int bit_rate;
 
+    int has_slice;
 } vbp_codec_data_h264;
 
 typedef struct _vbp_slice_data_h264
@@ -226,6 +230,10 @@
 
     vbp_codec_data_h264* codec_data;
 
+#ifdef USE_SLICE_HEADER_PARSING
+    VAParsePictureParameterBuffer* pic_parse_buffer;
+#endif
+
 } vbp_data_h264;
 
 /*
@@ -406,7 +414,7 @@
 #ifdef USE_HW_VP8
     VBP_VP8,
 #endif
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
     VBP_H264SECURE,
 #endif
 };
@@ -459,8 +467,7 @@
  */
 uint32 vbp_flush(Handle hcontent);
 
-
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 /*
  * update the the vbp context using the new data
  * @param hcontext: handle to VBP context.
diff --git a/mixvbp/vbp_manager/vbp_utils.c b/mixvbp/vbp_manager/vbp_utils.c
index 0da65b5..edd94c1 100755
--- a/mixvbp/vbp_manager/vbp_utils.c
+++ b/mixvbp/vbp_manager/vbp_utils.c
@@ -35,7 +35,7 @@
 #ifdef USE_HW_VP8
 #include "vbp_vp8_parser.h"
 #endif
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 #include "vbp_h264secure_parser.h"
 #endif
 
@@ -131,7 +131,7 @@
         break;
 #endif
 
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
     case VBP_H264SECURE:
         parser_name = "libmixvbp_h264secure.so";
         break;
@@ -177,11 +177,11 @@
 #ifdef USE_HW_VP8
         SET_FUNC_POINTER(VBP_VP8, vp8);
 #endif
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
         SET_FUNC_POINTER(VBP_H264SECURE, h264secure);
 #endif
     }
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
     if (pcontext->parser_type == VBP_H264SECURE) {
         pcontext->func_update_data = vbp_update_data_h264secure;
     }
@@ -562,8 +562,7 @@
     return VBP_OK;
 }
 
-
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 /**
  *
  * provide query data back to the consumer
diff --git a/mixvbp/vbp_manager/vbp_utils.h b/mixvbp/vbp_manager/vbp_utils.h
index 455951c..1f54ae6 100755
--- a/mixvbp/vbp_manager/vbp_utils.h
+++ b/mixvbp/vbp_manager/vbp_utils.h
@@ -64,7 +64,7 @@
 typedef uint32 (*function_parse_start_code)(vbp_context* cxt);
 typedef uint32 (*function_process_parsing_result)(vbp_context* cxt, int i);
 typedef uint32 (*function_populate_query_data)(vbp_context* cxt);
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 typedef uint32 (*function_update_data)(vbp_context* cxt, void *newdata, uint32 size);
 #endif
 
@@ -101,7 +101,7 @@
     function_parse_start_code func_parse_start_code;
     function_process_parsing_result func_process_parsing_result;
     function_populate_query_data func_populate_query_data;
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
     function_update_data func_update_data;
 #endif
 };
diff --git a/mixvbp/vbp_plugin/h264/Android.mk b/mixvbp/vbp_plugin/h264/Android.mk
index d667227..c27b102 100755
--- a/mixvbp/vbp_plugin/h264/Android.mk
+++ b/mixvbp/vbp_plugin/h264/Android.mk
@@ -68,3 +68,37 @@
 include $(BUILD_SHARED_LIBRARY)
 
 endif
+
+include $(CLEAR_VARS)
+PLATFORM_SUPPORT_SLICE_HEADER_PARSER := merrifield
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_SLICE_HEADER_PARSER)),)
+LOCAL_SRC_FILES := \
+        h264parse.c \
+        h264parse_bsd.c \
+        h264parse_math.c \
+        h264parse_mem.c \
+        h264parse_sei.c \
+        h264parse_pps.c \
+        h264parse_sps.c \
+        h264parse_dpb.c \
+        h264parse_sh.c \
+        secvideo/merrifield/viddec_h264secure_parse.c \
+        mix_vbp_h264_stubs.c
+
+LOCAL_CFLAGS := -DUSE_SLICE_HEADER_PARSING
+
+LOCAL_C_INCLUDES :=   \
+    $(LOCAL_PATH)/include   \
+    $(MIXVBP_DIR)/include   \
+    $(MIXVBP_DIR)/vbp_manager/include   \
+    $(MIXVBP_DIR)/vbp_manager/h264/include
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := libmixvbp_h264secure
+LOCAL_SHARED_LIBRARIES := libmixvbp liblog
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
+
diff --git a/mixvbp/vbp_plugin/h264/h264parse_dpb.c b/mixvbp/vbp_plugin/h264/h264parse_dpb.c
index e0b7c8f..883057a 100644
--- a/mixvbp/vbp_plugin/h264/h264parse_dpb.c
+++ b/mixvbp/vbp_plugin/h264/h264parse_dpb.c
@@ -2778,7 +2778,7 @@
                 h264_dpb_unmark_for_reference(p_dpb, p_dpb->active_fs->fs_idc);
                 h264_dpb_remove_ref_list(p_dpb, p_dpb->active_fs->fs_idc);
                 //h264_send_new_display_frame(0x01); //send ignore_frame signal to Host
-#ifndef USE_AVC_SHORT_FORMAT
+#if (!defined USE_AVC_SHORT_FORMAT && !defined USE_SLICE_HEADER_PARSING)
                 ///  Add into drop-out list for all frms in dpb without display
                 if (!(viddec_h264_get_is_non_existent(p_dpb->active_fs)))   {
                     if ( viddec_h264_get_is_output(&(p_dpb->fs[p_dpb->fs_dpb_idc[idx]])) ) {			//// This frame has been displayed but not released
@@ -3000,7 +3000,7 @@
     h264_dpb_set_active_fs(p_dpb, fs_idc);
     viddec_h264_set_is_frame_used(p_dpb->active_fs, 0);
 
-#ifndef USE_AVC_SHORT_FORMAT
+#if (!defined USE_AVC_SHORT_FORMAT && !defined USE_SLICE_HEADER_PARSING)
     //add to support frame relocation interface to host
     if (!(viddec_h264_get_is_non_existent(p_dpb->active_fs)))
     {
@@ -3355,7 +3355,7 @@
     if (viddec_h264_get_is_non_existent(p_dpb->active_fs) == 0)
     {
         *existing = 1;
-#ifndef USE_AVC_SHORT_FORMAT
+#if (!defined USE_AVC_SHORT_FORMAT && !defined USE_SLICE_HEADER_PARSING)
         p_dpb->frame_id_need_to_be_displayed[p_dpb->frame_numbers_need_to_be_displayed]=p_dpb->active_fs->fs_idc;
         p_dpb->frame_numbers_need_to_be_displayed++;
 #endif
diff --git a/mixvbp/vbp_plugin/h264/include/h264.h b/mixvbp/vbp_plugin/h264/include/h264.h
index f0da7ed..aefd3b7 100755
--- a/mixvbp/vbp_plugin/h264/include/h264.h
+++ b/mixvbp/vbp_plugin/h264/include/h264.h
@@ -982,6 +982,8 @@
         uint8_t			last_I_frame_idc;
         uint8_t			sei_b_state_ready;
         uint8_t			gop_err_flag;
+
+        uint8_t         has_slice;
     } h264_Info;
 
 
diff --git a/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c b/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c
new file mode 100755
index 0000000..2e5ac06
--- /dev/null
+++ b/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c
@@ -0,0 +1,987 @@
+#include "viddec_parser_ops.h"
+
+#include "viddec_pm.h"
+
+#include "h264.h"
+#include "h264parse.h"
+#include "h264parse.h"
+#include "h264parse_dpb.h"
+#include <vbp_trace.h>
+
+typedef struct _ParsedSliceHeaderH264Secure
+{
+    unsigned int size;
+
+    unsigned char nal_ref_idc;
+    unsigned char nal_unit_type;
+    unsigned char slice_type;
+    unsigned char redundant_pic_cnt;
+
+    unsigned short first_mb_in_slice;
+    char slice_qp_delta;
+    char slice_qs_delta;
+
+    unsigned char luma_log2_weight_denom;
+    unsigned char chroma_log2_weight_denom;
+    unsigned char cabac_init_idc;
+    unsigned char pic_order_cnt_lsb;
+
+    unsigned char pic_parameter_set_id;
+    unsigned short idr_pic_id;
+    unsigned char colour_plane_id;
+
+    char slice_alpha_c0_offset_div2;
+    char slice_beta_offset_div2;
+    unsigned char slice_group_change_cycle;
+    unsigned char disable_deblocking_filter_idc;
+
+    unsigned int frame_num;
+    int delta_pic_order_cnt_bottom;
+    int delta_pic_order_cnt[2];
+
+    unsigned char num_reorder_cmds[2];
+    unsigned char num_ref_active_minus1[2];
+
+    unsigned int weights_present[2][2];
+
+    unsigned short num_mem_man_ops;
+
+    union {
+        struct {
+            unsigned field_pic_flag                     : 1;
+            unsigned bottom_field_flag                  : 1;
+            unsigned num_ref_idx_active_override_flag   : 1;
+            unsigned direct_spatial_mv_pred_flag        : 1;
+            unsigned no_output_of_prior_pics_flag       : 1;
+            unsigned long_term_reference_flag           : 1;
+            unsigned idr_flag                           : 1;
+            unsigned anchor_pic_flag                    : 1;
+            unsigned inter_view_flag                    : 1;
+        } bits;
+
+        unsigned short value;
+    } flags;
+    unsigned short view_id;
+    unsigned char priority_id;
+    unsigned char temporal_id;
+} ParsedSliceHeaderH264Secure;
+
+
+typedef struct _vbp_h264_sliceheader {
+    uint32_t sliceHeaderKey;
+    ParsedSliceHeaderH264Secure parsedSliceHeader;
+    uint32_t *reorder_cmd;
+    int16_t *weight;
+    uint32_t *pic_marking;
+} vbp_h264_sliceheader;
+
+
+/* Init function which can be called to intialized local context on open and flush and preserve*/
+void viddec_h264secure_init(void *ctxt, uint32_t *persist_mem, uint32_t preserve)
+{
+    struct h264_viddec_parser* parser = ctxt;
+    h264_Info * pInfo = &(parser->info);
+
+    if (!preserve)
+    {
+        /* we don't initialize this data if we want to preserve
+           sequence and gop information */
+        h264_init_sps_pps(parser,persist_mem);
+    }
+    /* picture level info which will always be initialized */
+    h264_init_Info_under_sps_pps_level(pInfo);
+
+    return;
+}
+
+
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+uint32_t viddec_h264secure_parse(void *parent, void *ctxt)
+{
+    struct h264_viddec_parser* parser = ctxt;
+
+    h264_Info * pInfo = &(parser->info);
+
+    h264_Status status = H264_STATUS_ERROR;
+
+
+    uint8_t nal_ref_idc = 0;
+
+    ///// Parse NAL Unit header
+    pInfo->img.g_new_frame = 0;
+    pInfo->push_to_cur = 1;
+    pInfo->is_current_workload_done =0;
+    pInfo->nal_unit_type = 0;
+
+    h264_Parse_NAL_Unit(parent, pInfo, &nal_ref_idc);
+
+    ///// Check frame bounday for non-vcl elimitter
+    h264_check_previous_frame_end(pInfo);
+
+    pInfo->has_slice = 0;
+
+    //////// Parse valid NAL unit
+    switch ( pInfo->nal_unit_type )
+    {
+    case h264_NAL_UNIT_TYPE_IDR:
+        if (pInfo->got_start) {
+            pInfo->img.recovery_point_found |= 1;
+        }
+
+        pInfo->sei_rp_received = 0;
+
+    case h264_NAL_UNIT_TYPE_SLICE:
+        pInfo->has_slice = 1;
+        break;
+
+    ///// * Main profile doesn't support Data Partition, skipped.... *////
+    case h264_NAL_UNIT_TYPE_DPA:
+    case h264_NAL_UNIT_TYPE_DPB:
+    case h264_NAL_UNIT_TYPE_DPC:
+        //OS_INFO("***********************DP feature, not supported currently*******************\n");
+        status = H264_STATUS_NOTSUPPORT;
+        break;
+
+        //// * Parsing SEI info *////
+    case h264_NAL_UNIT_TYPE_SEI:
+        status = H264_STATUS_OK;
+
+        //OS_INFO("*****************************SEI**************************************\n");
+        if (pInfo->sps_valid) {
+            //h264_user_data_t user_data; /// Replace with tmp buffer while porting to FW
+            pInfo->number_of_first_au_info_nal_before_first_slice++;
+            /// parsing the SEI info
+            status = h264_Parse_Supplemental_Enhancement_Information_Message(parent, pInfo);
+        }
+
+        //h264_rbsp_trailing_bits(pInfo);
+        break;
+    case h264_NAL_UNIT_TYPE_SPS:
+    {
+        //OS_INFO("*****************************SPS**************************************\n");
+        ///
+        /// Can not define local SPS since the Current local stack size limitation!
+        /// Could be changed after the limitation gone
+        ///
+        VTRACE("h264_NAL_UNIT_TYPE_SPS +++");
+        uint8_t  old_sps_id=0;
+        vui_seq_parameters_t_not_used vui_seq_not_used;
+
+        old_sps_id = pInfo->active_SPS.seq_parameter_set_id;
+        h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set_used));
+
+        VTRACE("old_sps_id = %d", old_sps_id);
+        status = h264_Parse_SeqParameterSet(parent, pInfo, &(pInfo->active_SPS), &vui_seq_not_used, (int32_t *)pInfo->TMP_OFFSET_REFFRM_PADDR_GL);
+        if (status == H264_STATUS_OK) {
+            VTRACE("pInfo->active_SPS.seq_parameter_set_id = %d", pInfo->active_SPS.seq_parameter_set_id);
+            h264_Parse_Copy_Sps_To_DDR(pInfo, &(pInfo->active_SPS), pInfo->active_SPS.seq_parameter_set_id);
+            pInfo->sps_valid = 1;
+
+            if (1==pInfo->active_SPS.pic_order_cnt_type) {
+                h264_Parse_Copy_Offset_Ref_Frames_To_DDR(pInfo,(int32_t *)pInfo->TMP_OFFSET_REFFRM_PADDR_GL,pInfo->active_SPS.seq_parameter_set_id);
+            }
+        }
+        ///// Restore the active SPS if new arrival's id changed
+        if (old_sps_id>=MAX_NUM_SPS) {
+            h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set_used));
+            pInfo->active_SPS.seq_parameter_set_id = 0xff;
+        }
+        else {
+            if (old_sps_id!=pInfo->active_SPS.seq_parameter_set_id)  {
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            }
+            else  {
+                //h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set));
+              //  h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+                VTRACE("old_sps_id==pInfo->active_SPS.seq_parameter_set_id");
+               // pInfo->active_SPS.seq_parameter_set_id = 0xff;
+            }
+        }
+
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+        VTRACE("h264_NAL_UNIT_TYPE_SPS ---");
+    }
+    break;
+    case h264_NAL_UNIT_TYPE_PPS:
+    {
+        //OS_INFO("*****************************PPS**************************************\n");
+        VTRACE("h264_NAL_UNIT_TYPE_PPS +++");
+        uint32_t old_sps_id = pInfo->active_SPS.seq_parameter_set_id;
+        uint32_t old_pps_id = pInfo->active_PPS.pic_parameter_set_id;
+        VTRACE("old_sps_id = %d, old_pps_id = %d", old_sps_id, old_pps_id);
+
+        h264_memset(&pInfo->active_PPS, 0x0, sizeof(pic_param_set));
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+
+        if (h264_Parse_PicParameterSet(parent, pInfo, &pInfo->active_PPS)== H264_STATUS_OK)
+        {
+            h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), pInfo->active_PPS.seq_parameter_set_id);
+            VTRACE("pInfo->active_PPS.seq_parameter_set_id = %d", pInfo->active_PPS.seq_parameter_set_id);
+            VTRACE("pInfo->active_SPS.seq_parameter_set_id = %d", pInfo->active_SPS.seq_parameter_set_id);
+            if (old_sps_id != pInfo->active_SPS.seq_parameter_set_id)
+            {
+                pInfo->Is_SPS_updated = 1;
+            }
+            if (pInfo->active_SPS.seq_parameter_set_id != 0xff) {
+                h264_Parse_Copy_Pps_To_DDR(pInfo, &pInfo->active_PPS, pInfo->active_PPS.pic_parameter_set_id);
+                pInfo->got_start = 1;
+                if (pInfo->sei_information.recovery_point)
+                {
+                    pInfo->img.recovery_point_found |= 2;
+
+                    //// Enable the RP recovery if no IDR ---Cisco
+                    if ((pInfo->img.recovery_point_found & 1)==0)
+                        pInfo->sei_rp_received = 1;
+                }
+            }
+            else
+            {
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            }
+        } else {
+            if (old_sps_id<MAX_NUM_SPS)
+                h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
+            if (old_pps_id<MAX_NUM_PPS)
+                h264_Parse_Copy_Pps_From_DDR(pInfo, &(pInfo->active_PPS), old_pps_id);
+        }
+        VTRACE("pInfo->active_PPS.seq_parameter_set_id = %d", pInfo->active_PPS.seq_parameter_set_id);
+        VTRACE("pInfo->active_SPS.seq_parameter_set_id = %d", pInfo->active_SPS.seq_parameter_set_id);
+        VTRACE("h264_NAL_UNIT_TYPE_PPS ---");
+    } //// End of PPS parsing
+    break;
+
+
+    case h264_NAL_UNIT_TYPE_EOSeq:
+    case h264_NAL_UNIT_TYPE_EOstream:
+
+        h264_init_dpb(&(pInfo->dpb));
+
+        pInfo->is_current_workload_done=1;
+
+        status = H264_STATUS_OK;
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+        break;
+
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+        ///// primary_pic_type
+        {
+            uint32_t code = 0xff;
+            int32_t ret = 0;
+            ret = viddec_pm_get_bits(parent, (uint32_t *)&(code), 3);
+
+            if (ret != -1) {
+                //if(pInfo->got_start && (code == 0))
+                //{
+                //pInfo->img.recovery_point_found |= 4;
+                //}
+                pInfo->primary_pic_type_plus_one = (uint8_t)(code)+1;
+                status = H264_STATUS_OK;
+            }
+            pInfo->number_of_first_au_info_nal_before_first_slice++;
+            break;
+        }
+
+    case h264_NAL_UNIT_TYPE_Reserved1:
+    case h264_NAL_UNIT_TYPE_Reserved2:
+    case h264_NAL_UNIT_TYPE_Reserved3:
+    case h264_NAL_UNIT_TYPE_Reserved4:
+    case h264_NAL_UNIT_TYPE_Reserved5:
+        status = H264_STATUS_OK;
+        pInfo->number_of_first_au_info_nal_before_first_slice++;
+        break;
+
+    case h264_NAL_UNIT_TYPE_filler_data:
+        status = H264_STATUS_OK;
+        break;
+    case h264_NAL_UNIT_TYPE_ACP:
+        break;
+    case h264_NAL_UNIT_TYPE_SPS_extension:
+    case h264_NAL_UNIT_TYPE_unspecified:
+    case h264_NAL_UNIT_TYPE_unspecified2:
+        status = H264_STATUS_OK;
+        //nothing
+        break;
+    default:
+        status = H264_STATUS_OK;
+        break;
+    }
+
+    //pInfo->old_nal_unit_type = pInfo->nal_unit_type;
+    switch ( pInfo->nal_unit_type )
+    {
+    case h264_NAL_UNIT_TYPE_IDR:
+    case h264_NAL_UNIT_TYPE_SLICE:
+    case h264_NAL_UNIT_TYPE_Acc_unit_delimiter:
+    case h264_NAL_UNIT_TYPE_SPS:
+    case h264_NAL_UNIT_TYPE_PPS:
+    case h264_NAL_UNIT_TYPE_SEI:
+    case h264_NAL_UNIT_TYPE_EOSeq:
+    case h264_NAL_UNIT_TYPE_EOstream:
+    case h264_NAL_UNIT_TYPE_Reserved1:
+    case h264_NAL_UNIT_TYPE_Reserved2:
+    case h264_NAL_UNIT_TYPE_Reserved3:
+    case h264_NAL_UNIT_TYPE_Reserved4:
+    case h264_NAL_UNIT_TYPE_Reserved5:
+    {
+        pInfo->old_nal_unit_type = pInfo->nal_unit_type;
+        break;
+    }
+    default:
+        break;
+    }
+
+    return status;
+}
+
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------------------------ */
+
+void viddec_h264secure_get_context_size(viddec_parser_memory_sizes_t *size)
+{
+    /* Should return size of my structure */
+    size->context_size = sizeof(struct h264_viddec_parser);
+    size->persist_size = MAX_NUM_SPS * sizeof(seq_param_set_all)
+                         + MAX_NUM_PPS * sizeof(pic_param_set)
+                         + MAX_NUM_SPS * sizeof(int32_t) * MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE
+                         + sizeof(int32_t) * MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE;
+}
+
+
+
+/*--------------------------------------------------------------------------------------------------*/
+//
+// The syntax elements reordering_of_pic_nums_idc, abs_diff_pic_num_minus1, and long_term_pic_num
+// specify the change from the initial reference picture lists to the reference picture lists to be used
+// for decoding the slice
+
+// reordering_of_pic_nums_idc:
+// 0: abs_diff_pic_num_minus1 is present and corresponds to a difference to subtract from a picture number prediction value
+// 1: abs_diff_pic_num_minus1 is present and corresponds to a difference to add to a picture number prediction value
+// 2: long_term_pic_num is present and specifies the long-term picture number for a reference picture
+// 3: End loop for reordering of the initial reference picture list
+//
+/*--------------------------------------------------------------------------------------------------*/
+
+h264_Status h264secure_Parse_Ref_Pic_List_Reordering(h264_Info* pInfo, void *newdata, h264_Slice_Header_t *SliceHeader)
+{
+    //h264_Slice_Header_t* SliceHeader = &pInfo->SliceHeader;
+    int32_t reorder= -1;
+    uint32_t code;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+
+    if ((SliceHeader->slice_type != h264_PtypeI) && (SliceHeader->slice_type != h264_PtypeSI))
+    {
+        SliceHeader->sh_refpic_l0.ref_pic_list_reordering_flag = (uint8_t)(sliceheader_p->parsedSliceHeader.num_reorder_cmds[0] > 0);
+        VTRACE("sliceheader_p->parsedSliceHeader.num_reorder_cmds[0] = %d",
+            sliceheader_p->parsedSliceHeader.num_reorder_cmds[0]);
+        if (SliceHeader->sh_refpic_l0.ref_pic_list_reordering_flag)
+        {
+            if(sliceheader_p->parsedSliceHeader.num_reorder_cmds[0] > MAX_NUM_REF_FRAMES) {
+                return H264_SliceHeader_ERROR;
+            }
+            for (reorder = 0; reorder < sliceheader_p->parsedSliceHeader.num_reorder_cmds[0]; reorder++) {
+                code = sliceheader_p->reorder_cmd[reorder];
+                SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] = code >> 24;
+                VTRACE("SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[%d] = %d", 
+                    reorder,
+                    SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder]
+                    );
+                if ((SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] == 0) || (SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] == 1))
+                {
+                    SliceHeader->sh_refpic_l0.list_reordering_num[reorder].abs_diff_pic_num_minus1 = code & 0xFFFFFF;
+                    VTRACE("abs_diff_pic_num_minus1 = %d", SliceHeader->sh_refpic_l0.list_reordering_num[reorder].abs_diff_pic_num_minus1);
+                }
+                else if (SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] == 2)
+                {
+                    SliceHeader->sh_refpic_l0.list_reordering_num[reorder].long_term_pic_num = code & 0xFFFFFF;
+                    VTRACE("long_term_pic_num = %d", SliceHeader->sh_refpic_l0.list_reordering_num[reorder].long_term_pic_num);
+                }
+                if (SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] == 3)
+                {
+                    VTRACE("break here");
+                    break;
+                }
+            }
+            SliceHeader->sh_refpic_l0.reordering_of_pic_nums_idc[reorder] = 3;
+        }
+    }
+
+    if (SliceHeader->slice_type == h264_PtypeB)
+    {
+        SliceHeader->sh_refpic_l1.ref_pic_list_reordering_flag = (uint8_t)(sliceheader_p->parsedSliceHeader.num_reorder_cmds[1] > 0);
+        VTRACE("sliceheader_p->parsedSliceHeader.num_reorder_cmds[1] = %d",
+            sliceheader_p->parsedSliceHeader.num_reorder_cmds[1]);
+        if (SliceHeader->sh_refpic_l1.ref_pic_list_reordering_flag)
+        {
+            if (sliceheader_p->parsedSliceHeader.num_reorder_cmds[1] > MAX_NUM_REF_FRAMES) {
+                return H264_SliceHeader_ERROR;
+            }
+            for (reorder = 0; reorder < sliceheader_p->parsedSliceHeader.num_reorder_cmds[1]; reorder++) {
+                code = *(sliceheader_p->reorder_cmd + sliceheader_p->parsedSliceHeader.num_reorder_cmds[0] + reorder);
+                SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] = code >> 24;
+                if ((SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] == 0) || (SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] == 1))
+                {
+                    SliceHeader->sh_refpic_l1.list_reordering_num[reorder].abs_diff_pic_num_minus1 = code & 0xFFFFFF;
+                }
+                else if (SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] == 2)
+                {
+                    SliceHeader->sh_refpic_l1.list_reordering_num[reorder].long_term_pic_num = code & 0xFFFFFF;
+                }
+                if (SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] == 3)
+                {
+                    break;
+                }
+            }
+            SliceHeader->sh_refpic_l1.reordering_of_pic_nums_idc[reorder] = 3;
+        }
+    }
+    return H264_STATUS_OK;
+}
+
+h264_Status h264secure_Parse_Pred_Weight_Table(h264_Info* pInfo, void *newdata, h264_Slice_Header_t *SliceHeader)
+{
+    uint32_t i =0, j=0;
+    uint8_t flag;
+    uint32_t weightidx = 0;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+
+    SliceHeader->sh_predwttbl.luma_log2_weight_denom = sliceheader_p->parsedSliceHeader.luma_log2_weight_denom;
+
+    if (pInfo->active_SPS.sps_disp.chroma_format_idc != 0)
+    {
+        SliceHeader->sh_predwttbl.chroma_log2_weight_denom = sliceheader_p->parsedSliceHeader.chroma_log2_weight_denom;
+    }
+    for (i=0; i< SliceHeader->num_ref_idx_l0_active; i++)
+    {
+        flag = ((sliceheader_p->parsedSliceHeader.weights_present[0][0] >> i) & 0x01);
+        SliceHeader->sh_predwttbl.luma_weight_l0_flag = flag;
+        if (SliceHeader->sh_predwttbl.luma_weight_l0_flag)
+        {
+            SliceHeader->sh_predwttbl.luma_weight_l0[i] = sliceheader_p->weight[weightidx++];
+            SliceHeader->sh_predwttbl.luma_offset_l0[i] = sliceheader_p->weight[weightidx++];
+        }
+        else
+        {
+            SliceHeader->sh_predwttbl.luma_weight_l0[i] = (1 << SliceHeader->sh_predwttbl.luma_log2_weight_denom);
+            SliceHeader->sh_predwttbl.luma_offset_l0[i] = 0;
+        }
+
+        if (pInfo->active_SPS.sps_disp.chroma_format_idc != 0)
+        {
+            flag = ((sliceheader_p->parsedSliceHeader.weights_present[0][1] >> i) & 0x01);
+            SliceHeader->sh_predwttbl.chroma_weight_l0_flag = flag;
+            if (SliceHeader->sh_predwttbl.chroma_weight_l0_flag)
+            {
+                for (j=0; j <2; j++)
+                {
+                    SliceHeader->sh_predwttbl.chroma_weight_l0[i][j] = sliceheader_p->weight[weightidx++];
+                    SliceHeader->sh_predwttbl.chroma_offset_l0[i][j] = sliceheader_p->weight[weightidx++];
+                }
+            }
+            else
+            {
+                for (j=0; j <2; j++)
+                {
+                    SliceHeader->sh_predwttbl.chroma_weight_l0[i][j] = (1 << SliceHeader->sh_predwttbl.chroma_log2_weight_denom);
+                    SliceHeader->sh_predwttbl.chroma_offset_l0[i][j] = 0;
+                }
+            }
+        }
+
+    }
+
+    if (SliceHeader->slice_type == h264_PtypeB)
+    {
+        for (i=0; i< SliceHeader->num_ref_idx_l1_active; i++)
+        {
+            flag = ((sliceheader_p->parsedSliceHeader.weights_present[1][0] >> i) & 0x01);
+            SliceHeader->sh_predwttbl.luma_weight_l1_flag = flag;
+            if (SliceHeader->sh_predwttbl.luma_weight_l1_flag)
+            {
+                SliceHeader->sh_predwttbl.luma_weight_l1[i] = sliceheader_p->weight[weightidx++];
+                SliceHeader->sh_predwttbl.luma_offset_l1[i] = sliceheader_p->weight[weightidx++];
+            }
+            else
+            {
+                SliceHeader->sh_predwttbl.luma_weight_l1[i] = (1 << SliceHeader->sh_predwttbl.luma_log2_weight_denom);
+                SliceHeader->sh_predwttbl.luma_offset_l1[i] = 0;
+            }
+
+            if (pInfo->active_SPS.sps_disp.chroma_format_idc != 0)
+            {
+                flag = ((sliceheader_p->parsedSliceHeader.weights_present[1][1] >> i) & 0x01);
+                SliceHeader->sh_predwttbl.chroma_weight_l1_flag = flag;
+                if (SliceHeader->sh_predwttbl.chroma_weight_l1_flag)
+                {
+                    for (j=0; j <2; j++)
+                    {
+                        SliceHeader->sh_predwttbl.chroma_weight_l1[i][j] = sliceheader_p->weight[weightidx++];
+                        SliceHeader->sh_predwttbl.chroma_offset_l1[i][j] = sliceheader_p->weight[weightidx++];
+                    }
+                }
+                else
+                {
+                    for (j=0; j <2; j++)
+                    {
+                        SliceHeader->sh_predwttbl.chroma_weight_l1[i][j] = (1 << SliceHeader->sh_predwttbl.chroma_log2_weight_denom);
+                        SliceHeader->sh_predwttbl.chroma_offset_l1[i][j] = 0;
+                    }
+                }
+            }
+
+        }
+    }
+
+    return H264_STATUS_OK;
+} ///// End of h264_Parse_Pred_Weight_Table
+
+h264_Status h264secure_Parse_Dec_Ref_Pic_Marking(h264_Info* pInfo, void *newdata,h264_Slice_Header_t *SliceHeader)
+{
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+
+    uint8_t i = 0;
+    uint32_t idx = 0;
+    uint32_t code;
+    if (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        SliceHeader->sh_dec_refpic.no_output_of_prior_pics_flag = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.no_output_of_prior_pics_flag;
+        SliceHeader->sh_dec_refpic.long_term_reference_flag = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.long_term_reference_flag;
+        pInfo->img.long_term_reference_flag = SliceHeader->sh_dec_refpic.long_term_reference_flag;
+    }
+    else
+    {
+        SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag = (uint8_t)(sliceheader_p->parsedSliceHeader.num_mem_man_ops > 0);
+        VTRACE("SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag = %d", SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag);
+        ///////////////////////////////////////////////////////////////////////////////////////
+        //adaptive_ref_pic_marking_mode_flag Reference picture marking mode specified
+        //                              Sliding window reference picture marking mode: A marking mode
+        //                              providing a first-in first-out mechanism for short-term reference pictures.
+        //                              Adaptive reference picture marking mode: A reference picture
+        //                              marking mode providing syntax elements to specify marking of
+        //                              reference pictures as unused for reference?and to assign long-term
+        //                              frame indices.
+        ///////////////////////////////////////////////////////////////////////////////////////
+
+        if (SliceHeader->sh_dec_refpic.adaptive_ref_pic_marking_mode_flag)
+        {
+            do
+            {
+                if (i < NUM_MMCO_OPERATIONS)
+                {
+                    code = sliceheader_p->pic_marking[idx++];
+                    SliceHeader->sh_dec_refpic.memory_management_control_operation[i] = (uint8_t)(code >> 24);
+                    if ((SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 1) || (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 3))
+                    {
+                        SliceHeader->sh_dec_refpic.difference_of_pic_num_minus1[i] = code & 0xFFFFFF;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 2)
+                    {
+                        SliceHeader->sh_dec_refpic.long_term_pic_num[i] = code & 0xFFFFFF;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 6)
+                    {
+                        SliceHeader->sh_dec_refpic.long_term_frame_idx[i] = code & 0xFFFFFF;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 3) {
+                        SliceHeader->sh_dec_refpic.long_term_frame_idx[i] = sliceheader_p->pic_marking[idx++];
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 4)
+                    {
+                        SliceHeader->sh_dec_refpic.max_long_term_frame_idx_plus1[i] = code & 0xFFFFFF;
+                    }
+
+                    if (SliceHeader->sh_dec_refpic.memory_management_control_operation[i] == 5)
+                    {
+                        pInfo->img.curr_has_mmco_5 = 1;
+                    }
+                }
+
+                if (i >= NUM_MMCO_OPERATIONS) {
+                    return H264_STATUS_ERROR;
+                }
+
+            } while (SliceHeader->sh_dec_refpic.memory_management_control_operation[i++] != 0);
+        }
+    }
+
+    SliceHeader->sh_dec_refpic.dec_ref_pic_marking_count = i;
+
+    return H264_STATUS_OK;
+}
+
+
+uint32_t h264secure_Update_Slice_Header(h264_Info* pInfo, void *newdata, h264_Slice_Header_t *SliceHeader)
+{
+    h264_Status retStatus = H264_STATUS_OK;
+    uint8_t data;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) newdata;
+    ///// first_mb_in_slice
+    SliceHeader->first_mb_in_slice = sliceheader_p->parsedSliceHeader.first_mb_in_slice;
+
+    ///// slice_type
+    data = sliceheader_p->parsedSliceHeader.slice_type;
+    SliceHeader->slice_type = (data%5);
+    if (SliceHeader->slice_type > h264_PtypeI) {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+    SliceHeader->pic_parameter_id  = (uint8_t)sliceheader_p->parsedSliceHeader.pic_parameter_set_id;
+    retStatus = h264_active_par_set(pInfo, SliceHeader);
+
+    switch (pInfo->active_SPS.profile_idc)
+    {
+        case h264_ProfileBaseline:
+        case h264_ProfileMain:
+        case h264_ProfileExtended:
+            pInfo->active_PPS.transform_8x8_mode_flag=0;
+            pInfo->active_PPS.pic_scaling_matrix_present_flag =0;
+            pInfo->active_PPS.second_chroma_qp_index_offset = pInfo->active_PPS.chroma_qp_index_offset;
+        default:
+            break;
+    }
+
+    uint32_t code;
+    int32_t max_mb_num=0;
+
+    SliceHeader->frame_num = (int32_t)sliceheader_p->parsedSliceHeader.frame_num;
+
+    /// Picture structure
+    SliceHeader->structure = FRAME;
+    SliceHeader->field_pic_flag = 0;
+    SliceHeader->bottom_field_flag = 0;
+
+    if (!(pInfo->active_SPS.sps_disp.frame_mbs_only_flag))
+    {
+        /// field_pic_flag
+        SliceHeader->field_pic_flag = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.field_pic_flag;
+
+        if (SliceHeader->field_pic_flag)
+        {
+            SliceHeader->bottom_field_flag = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.bottom_field_flag;
+            SliceHeader->structure = SliceHeader->bottom_field_flag? BOTTOM_FIELD: TOP_FIELD;
+        }
+    }
+
+    ////// Check valid or not of first_mb_in_slice
+    if (SliceHeader->structure == FRAME) {
+        max_mb_num = pInfo->img.FrameHeightInMbs * pInfo->img.PicWidthInMbs;
+    } else {
+        max_mb_num = pInfo->img.FrameHeightInMbs * pInfo->img.PicWidthInMbs/2;
+    }
+
+
+    if (pInfo->active_SPS.sps_disp.mb_adaptive_frame_field_flag & (!(pInfo->SliceHeader.field_pic_flag))) {
+        SliceHeader->first_mb_in_slice <<=1;
+    }
+
+    if (SliceHeader->first_mb_in_slice >= max_mb_num) {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+
+    if (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        SliceHeader->idr_pic_id = sliceheader_p->parsedSliceHeader.idr_pic_id;
+    }
+
+    if (pInfo->active_SPS.pic_order_cnt_type == 0)
+    {
+        SliceHeader->pic_order_cnt_lsb = (uint32_t)sliceheader_p->parsedSliceHeader.pic_order_cnt_lsb;
+
+        if ((pInfo->active_PPS.pic_order_present_flag) && !(SliceHeader->field_pic_flag))
+        {
+            SliceHeader->delta_pic_order_cnt_bottom = sliceheader_p->parsedSliceHeader.delta_pic_order_cnt_bottom;
+        }
+        else
+        {
+            SliceHeader->delta_pic_order_cnt_bottom = 0;
+        }
+    }
+
+    if ((pInfo->active_SPS.pic_order_cnt_type == 1) && !(pInfo->active_SPS.delta_pic_order_always_zero_flag))
+    {
+        SliceHeader->delta_pic_order_cnt[0] = sliceheader_p->parsedSliceHeader.delta_pic_order_cnt[0];
+        if ((pInfo->active_PPS.pic_order_present_flag) && !(SliceHeader->field_pic_flag))
+        {
+            SliceHeader->delta_pic_order_cnt[1] = sliceheader_p->parsedSliceHeader.delta_pic_order_cnt[1];
+        }
+    }
+
+    if (pInfo->active_PPS.redundant_pic_cnt_present_flag)
+    {
+        SliceHeader->redundant_pic_cnt = sliceheader_p->parsedSliceHeader.redundant_pic_cnt;
+        if (SliceHeader->redundant_pic_cnt > 127) {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    } else {
+        SliceHeader->redundant_pic_cnt = 0;
+    }
+
+    int32_t  slice_alpha_c0_offset, slice_beta_offset;
+    uint32_t bits_offset =0, byte_offset =0;
+    uint8_t  is_emul =0;
+
+    /// direct_spatial_mv_pred_flag
+    if (SliceHeader->slice_type == h264_PtypeB)
+    {
+        SliceHeader->direct_spatial_mv_pred_flag = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.direct_spatial_mv_pred_flag;
+    }
+    else
+    {
+        SliceHeader->direct_spatial_mv_pred_flag = 0;
+    }
+    //
+    // Reset ref_idx and Overide it if exist
+    //
+    SliceHeader->num_ref_idx_l0_active = pInfo->active_PPS.num_ref_idx_l0_active;
+    SliceHeader->num_ref_idx_l1_active = pInfo->active_PPS.num_ref_idx_l1_active;
+
+    if ((SliceHeader->slice_type == h264_PtypeP) || (SliceHeader->slice_type == h264_PtypeSP) || (SliceHeader->slice_type == h264_PtypeB))
+    {
+        SliceHeader->num_ref_idx_active_override_flag  = (uint8_t)sliceheader_p->parsedSliceHeader.flags.bits.num_ref_idx_active_override_flag;
+        if (SliceHeader->num_ref_idx_active_override_flag)
+        {
+            SliceHeader->num_ref_idx_l0_active = sliceheader_p->parsedSliceHeader.num_ref_active_minus1[0]+ 1;
+            if (SliceHeader->slice_type == h264_PtypeB)
+            {
+                SliceHeader->num_ref_idx_l1_active = sliceheader_p->parsedSliceHeader.num_ref_active_minus1[1]+1;
+            }
+        }
+    }
+
+    if (SliceHeader->slice_type != h264_PtypeB) {
+        SliceHeader->num_ref_idx_l1_active = 0;
+    }
+
+    if ((SliceHeader->num_ref_idx_l0_active > MAX_NUM_REF_FRAMES) || (SliceHeader->num_ref_idx_l1_active > MAX_NUM_REF_FRAMES))
+    {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+    if (h264secure_Parse_Ref_Pic_List_Reordering(pInfo,newdata,SliceHeader) != H264_STATUS_OK)
+    {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+
+    ////
+    //// Parse Pred_weight_table but not store it becasue it will be reparsed in HW
+    ////
+    if (((pInfo->active_PPS.weighted_pred_flag) && ((SliceHeader->slice_type == h264_PtypeP) || (SliceHeader->slice_type == h264_PtypeSP))) || ((pInfo->active_PPS.weighted_bipred_idc == 1) && (SliceHeader->slice_type == h264_PtypeB)))
+    {
+        if (h264secure_Parse_Pred_Weight_Table(pInfo,newdata, SliceHeader) != H264_STATUS_OK)
+        {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    }
+
+
+
+    ////
+    //// Parse Ref_pic marking if there
+    ////
+    if (SliceHeader->nal_ref_idc != 0)
+    {
+        if (h264secure_Parse_Dec_Ref_Pic_Marking(pInfo, newdata, SliceHeader) != H264_STATUS_OK)
+        {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    }
+
+    if ((pInfo->active_PPS.entropy_coding_mode_flag) && (SliceHeader->slice_type != h264_PtypeI) && (SliceHeader->slice_type != h264_PtypeSI))
+    {
+        SliceHeader->cabac_init_idc = sliceheader_p->parsedSliceHeader.cabac_init_idc;
+    }
+    else
+    {
+        SliceHeader->cabac_init_idc = 0;
+    }
+
+    if (SliceHeader->cabac_init_idc > 2)
+    {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+    SliceHeader->slice_qp_delta = sliceheader_p->parsedSliceHeader.slice_qp_delta;
+
+    if ( (SliceHeader->slice_qp_delta > (25-pInfo->active_PPS.pic_init_qp_minus26)) || (SliceHeader->slice_qp_delta < -(26+pInfo->active_PPS.pic_init_qp_minus26)))
+    {
+        retStatus = H264_STATUS_NOTSUPPORT;
+        return retStatus;
+    }
+
+    if ((SliceHeader->slice_type == h264_PtypeSP)|| (SliceHeader->slice_type == h264_PtypeSI) )
+    {
+        if (SliceHeader->slice_type == h264_PtypeSP)
+        {
+            SliceHeader->sp_for_switch_flag  = 0;
+        }
+        SliceHeader->slice_qs_delta = sliceheader_p->parsedSliceHeader.slice_qs_delta;
+        if ( (SliceHeader->slice_qs_delta > (25-pInfo->active_PPS.pic_init_qs_minus26)) || (SliceHeader->slice_qs_delta < -(26+pInfo->active_PPS.pic_init_qs_minus26)) )
+        {
+            retStatus = H264_STATUS_NOTSUPPORT;
+            return retStatus;
+        }
+    }
+    if (pInfo->active_PPS.deblocking_filter_control_present_flag)
+    {
+        SliceHeader->disable_deblocking_filter_idc = sliceheader_p->parsedSliceHeader.disable_deblocking_filter_idc;
+        if (SliceHeader->disable_deblocking_filter_idc != 1)
+        {
+            SliceHeader->slice_alpha_c0_offset_div2 = sliceheader_p->parsedSliceHeader.slice_alpha_c0_offset_div2;
+            slice_alpha_c0_offset = SliceHeader->slice_alpha_c0_offset_div2 << 1;
+            if (slice_alpha_c0_offset < -12 || slice_alpha_c0_offset > 12)
+            {
+                retStatus = H264_STATUS_NOTSUPPORT;
+                return retStatus;
+            }
+
+            SliceHeader->slice_beta_offset_div2 = sliceheader_p->parsedSliceHeader.slice_beta_offset_div2;
+            slice_beta_offset = SliceHeader->slice_beta_offset_div2 << 1;
+            if (slice_beta_offset < -12 || slice_beta_offset > 12)
+            {
+                retStatus = H264_STATUS_NOTSUPPORT;
+                return retStatus;
+            }
+        }
+        else
+        {
+            SliceHeader->slice_alpha_c0_offset_div2 = 0;
+            SliceHeader->slice_beta_offset_div2 = 0;
+        }
+    }
+
+    retStatus = H264_STATUS_OK;
+    return retStatus;
+}
+uint32_t viddec_h264secure_update(void *parent, void *data, uint32_t size)
+{
+    viddec_pm_cxt_t * parser_cxt = (viddec_pm_cxt_t *)parent;
+    struct h264_viddec_parser* parser = (struct h264_viddec_parser*) &parser_cxt->codec_data[0];
+    h264_Info * pInfo = &(parser->info);
+
+    h264_Status status = H264_STATUS_ERROR;
+    vbp_h264_sliceheader* sliceheader_p = (vbp_h264_sliceheader*) data;
+
+    pInfo->img.g_new_frame = 0;
+    pInfo->push_to_cur = 1;
+    pInfo->is_current_workload_done =0;
+    pInfo->nal_unit_type = 0;
+    pInfo->nal_unit_type = sliceheader_p->parsedSliceHeader.nal_unit_type;
+
+    h264_Slice_Header_t next_SliceHeader;
+
+    /// Reset next slice header
+    h264_memset(&next_SliceHeader, 0x0, sizeof(h264_Slice_Header_t));
+    next_SliceHeader.nal_ref_idc = sliceheader_p->parsedSliceHeader.nal_ref_idc;
+
+    if ( (1==pInfo->primary_pic_type_plus_one)&&(pInfo->got_start))
+    {
+        pInfo->img.recovery_point_found |=4;
+    }
+    pInfo->primary_pic_type_plus_one = 0;
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Step 2: Parsing slice header
+    ////////////////////////////////////////////////////////////////////////////
+    /// PWT
+    pInfo->h264_pwt_start_byte_offset=0;
+    pInfo->h264_pwt_start_bit_offset=0;
+    pInfo->h264_pwt_end_byte_offset=0;
+    pInfo->h264_pwt_end_bit_offset=0;
+    pInfo->h264_pwt_enabled =0;
+    /// IDR flag
+    next_SliceHeader.idr_flag = (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR);
+
+    /// Pass slice header
+    status = h264secure_Update_Slice_Header(pInfo, sliceheader_p, &next_SliceHeader);
+
+    pInfo->sei_information.recovery_point = 0;
+    pInfo->img.current_slice_num++;
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Step 3: Processing if new picture coming
+    //  1) if it's the second field
+    //  2) if it's a new frame
+    ////////////////////////////////////////////////////////////////////////////
+    //AssignQuantParam(pInfo);
+    if (h264_is_new_picture_start(pInfo, next_SliceHeader, pInfo->SliceHeader))
+    {
+        //
+        ///----------------- New Picture.boundary detected--------------------
+        //
+        pInfo->img.g_new_pic++;
+
+        //
+        // Complete previous picture
+        h264_dpb_store_previous_picture_in_dpb(pInfo, 0, 0); //curr old
+
+        //
+        // Update slice structures:
+        h264_update_old_slice(pInfo, next_SliceHeader);  //cur->old; next->cur;
+
+        //
+        // 1) if resolution change: reset dpb
+        // 2) else: init frame store
+        h264_update_img_info(pInfo);  //img, dpb
+
+        //
+        ///----------------- New frame.boundary detected--------------------
+        //
+        pInfo->img.second_field = h264_is_second_field(pInfo);
+        if (pInfo->img.second_field == 0)
+        {
+            pInfo->img.g_new_frame = 1;
+            h264_dpb_update_queue_dangling_field(pInfo);
+            h264_dpb_gaps_in_frame_num_mem_management(pInfo);
+        }
+        /// Decoding POC
+        h264_hdr_decoding_poc (pInfo, 0, 0);
+        //
+        /// Init Frame Store for next frame
+        h264_dpb_init_frame_store (pInfo);
+        pInfo->img.current_slice_num = 1;
+
+        if (pInfo->SliceHeader.first_mb_in_slice != 0)
+        {
+            ////Come here means we have slice lost at the beginning, since no FMO support
+            pInfo->SliceHeader.sh_error |= (pInfo->SliceHeader.structure << 17);
+        }
+    }
+    else ///////////////////////////////////////////////////// If Not a picture start
+    {
+        /// Update slice structures: cur->old; next->cur;
+        h264_update_old_slice(pInfo, next_SliceHeader);
+        /// 1) if resolution change: reset dpb
+        /// 2) else: update img info
+        h264_update_img_info(pInfo);
+    }
+    //////////////////////////////////////////////////////////////
+    // Step 4: DPB reference list init and reordering
+    //////////////////////////////////////////////////////////////
+
+    //////////////////////////////////////////////// Update frame Type--- IDR/I/P/B for frame or field
+    h264_update_frame_type(pInfo);
+
+    h264_dpb_update_ref_lists( pInfo);
+
+    return status;
+}
+
+
diff --git a/videodecoder/Android.mk b/videodecoder/Android.mk
index 32abd4e..94d3605 100644
--- a/videodecoder/Android.mk
+++ b/videodecoder/Android.mk
@@ -34,6 +34,7 @@
     LOCAL_CFLAGS += -DUSE_AVC_SHORT_FORMAT -DUSE_GEN_HW
 endif
 
+
 PLATFORM_USE_HYBRID_DRIVER := \
     baytrail
 
@@ -41,6 +42,13 @@
     LOCAL_CFLAGS += -DUSE_HYBRID_DRIVER
 endif
 
+PLATFORM_SUPPORT_SLICE_HEADER_PARSER := \
+    merrifield
+
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_SLICE_HEADER_PARSER)),)
+    LOCAL_CFLAGS += -DUSE_SLICE_HEADER_PARSING
+endif
+
 #LOCAL_LDLIBS += -lpthread
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/videodecoder/VideoDecoderAVC.cpp b/videodecoder/VideoDecoderAVC.cpp
old mode 100644
new mode 100755
index 49c2efd..c3f3bd4
--- a/videodecoder/VideoDecoderAVC.cpp
+++ b/videodecoder/VideoDecoderAVC.cpp
@@ -514,6 +514,7 @@
     // update DPB  from the reference list in each slice.
     for (uint32_t slice = 0; slice < picData->num_slices; slice++) {
         sliceParam = &(picData->slc_data[slice].slc_parms);
+
         for (int32_t list = 0; list < 2; list++) {
             refList = (list == 0) ? sliceParam->RefPicList0 :
                                     sliceParam->RefPicList1;
@@ -703,6 +704,8 @@
         mVideoFormatInfo.height = height;
     }
 
+
+
     // video_range has default value of 0.
     mVideoFormatInfo.videoRange = data->codec_data->video_full_range_flag;
 
diff --git a/videodecoder/VideoDecoderAVC.h b/videodecoder/VideoDecoderAVC.h
old mode 100644
new mode 100755
index efc9f97..880b659
--- a/videodecoder/VideoDecoderAVC.h
+++ b/videodecoder/VideoDecoderAVC.h
@@ -39,15 +39,15 @@
     virtual Decode_Status decode(VideoDecodeBuffer *buffer);
 
 protected:
-    Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
-    Decode_Status beginDecodingFrame(vbp_data_h264 *data);
-    Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+    virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+    virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+    virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
     virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
     Decode_Status setReference(VASliceParameterBufferH264 *sliceParam);
     Decode_Status updateDPB(VAPictureParameterBufferH264 *picParam);
     Decode_Status updateReferenceFrames(vbp_picture_data_h264 *picData);
     void removeReferenceFromDPB(VAPictureParameterBufferH264 *picParam);
-    inline uint32_t getPOC(VAPictureH264 *pic); // Picture Order Count
+    uint32_t getPOC(VAPictureH264 *pic); // Picture Order Count
     inline VASurfaceID findSurface(VAPictureH264 *pic);
     inline VideoSurfaceBuffer* findSurfaceBuffer(VAPictureH264 *pic);
     inline VideoSurfaceBuffer* findRefSurfaceBuffer(VAPictureH264 *pic);
diff --git a/videodecoder/VideoDecoderBase.cpp b/videodecoder/VideoDecoderBase.cpp
old mode 100644
new mode 100755
index 0f4c297..7707fce
--- a/videodecoder/VideoDecoderBase.cpp
+++ b/videodecoder/VideoDecoderBase.cpp
@@ -98,6 +98,7 @@
     }
 
     if ((int32_t)mParserType != VBP_INVALID) {
+        ITRACE("mParserType = %d", mParserType);
         if (vbp_open(mParserType, &mParserHandle) != VBP_OK) {
             ETRACE("Failed to open VBP parser.");
             return DECODE_NO_PARSER;
@@ -1041,6 +1042,8 @@
     return DECODE_SUCCESS;
 }
 
+
+
 Decode_Status VideoDecoderBase::mapSurface(void) {
     VAStatus vaStatus = VA_STATUS_SUCCESS;
     VAImage image;
@@ -1288,7 +1291,7 @@
 }
 
 // This function should be called before start() to load different type of parsers
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
 Decode_Status VideoDecoderBase::setParserType(_vbp_parser_type type) {
     if ((int32_t)type != VBP_INVALID) {
         ITRACE("Parser Type = %d", (int32_t)type);
@@ -1316,6 +1319,18 @@
     return DECODE_SUCCESS;
 }
 
+Decode_Status VideoDecoderBase::queryBuffer(void** vbpData) {
+    if (mParserHandle == NULL) {
+        return DECODE_NO_PARSER;
+    }
+
+    uint32_t vbpStatus;
+    vbpStatus = vbp_query(mParserHandle, vbpData);
+    CHECK_VBP_STATUS("vbp_query");
+
+    return DECODE_SUCCESS;
+}
+
 Decode_Status VideoDecoderBase::getCodecSpecificConfigs(VAProfile profile, VAConfigID *config) {
     VAStatus vaStatus;
     VAConfigAttrib attrib;
diff --git a/videodecoder/VideoDecoderBase.h b/videodecoder/VideoDecoderBase.h
old mode 100644
new mode 100755
index 80dd518..ab9193e
--- a/videodecoder/VideoDecoderBase.h
+++ b/videodecoder/VideoDecoderBase.h
@@ -90,8 +90,9 @@
 
     virtual Decode_Status getRawDataFromSurface(VideoRenderBuffer *renderBuffer = NULL, uint8_t *pRawData = NULL, uint32_t *pSize = NULL, bool internal = true);
 
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT) || (defined USE_SLICE_HEADER_PARSING)
     Decode_Status updateBuffer(uint8_t *buffer, int32_t size, void** vbpData);
+    Decode_Status queryBuffer(void **vbpData);
     Decode_Status setParserType(_vbp_parser_type type);
     virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID *config);
 #endif
diff --git a/videodecoder/VideoDecoderHost.cpp b/videodecoder/VideoDecoderHost.cpp
index 973ab22..1c30fe8 100644
--- a/videodecoder/VideoDecoderHost.cpp
+++ b/videodecoder/VideoDecoderHost.cpp
@@ -25,9 +25,11 @@
 #include "VideoDecoderWMV.h"
 #include "VideoDecoderMPEG4.h"
 #include "VideoDecoderAVC.h"
+
 #ifdef USE_INTEL_SECURE_AVC
 #include "VideoDecoderAVCSecure.h"
 #endif
+
 #ifdef USE_HW_VP8
 #include "VideoDecoderVP8.h"
 #endif
@@ -71,6 +73,7 @@
         return (IVideoDecoder *)p;
     }
 #endif
+
     else {
         ETRACE("Unknown mime type: %s", mimeType);
     }
diff --git a/videodecoder/VideoDecoderTrace.h b/videodecoder/VideoDecoderTrace.h
old mode 100644
new mode 100755
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
old mode 100644
new mode 100755
index ab7bc7e..671e8bd
--- a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
@@ -21,206 +21,417 @@
 * approved by Intel in writing.
 *
 */
-
-#include "VideoDecoderAVCSecure.h"
+#include <va/va.h>
+#include "VideoDecoderBase.h"
+#include "VideoDecoderAVC.h"
 #include "VideoDecoderTrace.h"
-#include <string.h>
+#include "vbp_loader.h"
+#include "VideoDecoderAVCSecure.h"
 
-
-#define STARTCODE_00                0x00
-#define STARTCODE_01                0x01
+#define MAX_SLICEHEADER_BUFFER_SIZE 4096
 #define STARTCODE_PREFIX_LEN        3
 #define NALU_TYPE_MASK              0x1F
-
-
-// mask for little endian, to mast the second and fourth bytes in the byte stream
-#define STARTCODE_MASK0             0xFF000000 //0x00FF0000
-#define STARTCODE_MASK1             0x0000FF00  //0x000000FF
-
-
-typedef enum {
-    NAL_UNIT_TYPE_unspecified0 = 0,
-    NAL_UNIT_TYPE_SLICE,
-    NAL_UNIT_TYPE_DPA,
-    NAL_UNIT_TYPE_DPB,
-    NAL_UNIT_TYPE_DPC,
-    NAL_UNIT_TYPE_IDR,
-    NAL_UNIT_TYPE_SEI,
-    NAL_UNIT_TYPE_SPS,
-    NAL_UNIT_TYPE_PPS,
-    NAL_UNIT_TYPE_Acc_unit_delimiter,
-    NAL_UNIT_TYPE_EOSeq,
-    NAL_UNIT_TYPE_EOstream,
-    NAL_UNIT_TYPE_filler_data,
-    NAL_UNIT_TYPE_SPS_extension,
-    NAL_UNIT_TYPE_Reserved14,
-    NAL_UNIT_TYPE_Reserved15,
-    NAL_UNIT_TYPE_Reserved16,
-    NAL_UNIT_TYPE_Reserved17,
-    NAL_UNIT_TYPE_Reserved18,
-    NAL_UNIT_TYPE_ACP,
-    NAL_UNIT_TYPE_Reserved20,
-    NAL_UNIT_TYPE_Reserved21,
-    NAL_UNIT_TYPE_Reserved22,
-    NAL_UNIT_TYPE_Reserved23,
-    NAL_UNIT_TYPE_unspecified24,
-} NAL_UNIT_TYPE;
-
-#ifndef min
-#define min(X, Y)  ((X) <(Y) ? (X) : (Y))
-#endif
-
-
+#define MAX_NALU_HEADER_BUFFER      8192
 static const uint8_t startcodePrefix[STARTCODE_PREFIX_LEN] = {0x00, 0x00, 0x01};
 
+/* H264 start code values */
+typedef enum _h264_nal_unit_type
+{
+    h264_NAL_UNIT_TYPE_unspecified = 0,
+    h264_NAL_UNIT_TYPE_SLICE,
+    h264_NAL_UNIT_TYPE_DPA,
+    h264_NAL_UNIT_TYPE_DPB,
+    h264_NAL_UNIT_TYPE_DPC,
+    h264_NAL_UNIT_TYPE_IDR,
+    h264_NAL_UNIT_TYPE_SEI,
+    h264_NAL_UNIT_TYPE_SPS,
+    h264_NAL_UNIT_TYPE_PPS,
+    h264_NAL_UNIT_TYPE_Acc_unit_delimiter,
+    h264_NAL_UNIT_TYPE_EOSeq,
+    h264_NAL_UNIT_TYPE_EOstream,
+    h264_NAL_UNIT_TYPE_filler_data,
+    h264_NAL_UNIT_TYPE_SPS_extension,
+    h264_NAL_UNIT_TYPE_ACP = 19,
+    h264_NAL_UNIT_TYPE_Slice_extension = 20
+} h264_nal_unit_type_t;
 
 VideoDecoderAVCSecure::VideoDecoderAVCSecure(const char *mimeType)
-    : VideoDecoderAVC(mimeType),
-      mNaluHeaderBuffer(NULL),
-      mInputBuffer(NULL) {
-
-    memset(&mMetadata, 0, sizeof(NaluMetadata));
-    memset(&mByteStream, 0, sizeof(NaluByteStream));
-}
-
-VideoDecoderAVCSecure::~VideoDecoderAVCSecure() {
+    : VideoDecoderAVC(mimeType){
+    mFrameSize     = 0;
+    mFrameData     = NULL;
+    mIsEncryptData = 0;
+    mClearData     = NULL;
+    setParserType(VBP_H264SECURE);
+    mFrameIdx = 0;
 }
 
 Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
+    VTRACE("VideoDecoderAVCSecure::start");
     Decode_Status status = VideoDecoderAVC::start(buffer);
     if (status != DECODE_SUCCESS) {
         return status;
     }
 
-    mMetadata.naluInfo = new NaluInfo [MAX_NALU_NUMBER];
-    mByteStream.byteStream = new uint8_t [MAX_NALU_HEADER_BUFFER];
-    mNaluHeaderBuffer = new uint8_t [MAX_NALU_HEADER_BUFFER];
-
-    if (mMetadata.naluInfo == NULL ||
-        mByteStream.byteStream == NULL ||
-        mNaluHeaderBuffer == NULL) {
-        ETRACE("Failed to allocate memory.");
-        // TODO: release all allocated memory
+    mClearData = new uint8_t [MAX_NALU_HEADER_BUFFER];
+    if (mClearData == NULL) {
+        ETRACE("Failed to allocate memory for mClearData");
         return DECODE_MEMORY_FAIL;
     }
+
     return status;
 }
 
 void VideoDecoderAVCSecure::stop(void) {
+    VTRACE("VideoDecoderAVCSecure::stop");
     VideoDecoderAVC::stop();
 
-    if (mMetadata.naluInfo) {
-        delete [] mMetadata.naluInfo;
-        mMetadata.naluInfo = NULL;
-    }
-
-    if (mByteStream.byteStream) {
-        delete [] mByteStream.byteStream;
-        mByteStream.byteStream = NULL;
-    }
-
-    if (mNaluHeaderBuffer) {
-        delete [] mNaluHeaderBuffer;
-        mNaluHeaderBuffer = NULL;
+    if (mClearData) {
+        delete [] mClearData;
+        mClearData = NULL;
     }
 }
 
 Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+    VTRACE("VideoDecoderAVCSecure::decode");
     Decode_Status status;
-    int32_t sizeAccumulated = 0;
-    int32_t sizeLeft = 0;
-    uint8_t *pByteStream = NULL;
-    NaluInfo *pNaluInfo = mMetadata.naluInfo;
-
-    if (buffer->flag & IS_SECURE_DATA) {
-        // NALU headers are appended to encrypted video bitstream
-        // |...encrypted video bitstream (16 bytes aligned)...| 4 bytes of header size |...NALU headers..|
-        pByteStream = buffer->data + buffer->size + 4;
-        sizeLeft = *(int32_t *)(buffer->data + buffer->size);
-        VTRACE("%s sizeLeft: %d buffer->size: %#x", __func__, sizeLeft, buffer->size);
-        mInputBuffer = buffer->data;
-    } else {
-        status = parseAnnexBStream(buffer->data, buffer->size, &mByteStream);
-        CHECK_STATUS("parseAnnexBStream");
-        pByteStream = mByteStream.byteStream;
-        sizeLeft = mByteStream.streamPos;
-        mInputBuffer = buffer->data;
-    }
-    if (sizeLeft < 4) {
-        ETRACE("Not enough data to read number of NALU.");
+    vbp_data_h264 *data = NULL;
+    if (buffer == NULL) {
         return DECODE_INVALID_DATA;
     }
 
-    // read number of NALU
-    memcpy(&(mMetadata.naluNumber), pByteStream, sizeof(int32_t));
-    pByteStream += 4;
-    sizeLeft -= 4;
+    int32_t clear_data_size = 0;
+    uint8_t *clear_data = NULL;
+    uint8_t naluType = 0;
 
-    if (mMetadata.naluNumber == 0) {
-        WTRACE("Number of NALU is ZERO!");
-        return DECODE_SUCCESS;
+    int32_t num_nalus;
+    int32_t nalu_offset;
+    int32_t offset;
+    uint8_t *data_src;
+    uint8_t *nalu_data;
+    uint32_t nalu_size;
+//    uint32_t testsize;
+//    uint8_t *testdata;
+    if (buffer->flag & IS_SECURE_DATA) {
+        VTRACE("Decoding protected video ...");
+        mIsEncryptData = 1;
+
+        mFrameData = buffer->data;
+        mFrameSize = buffer->size;
+        VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+#if 0
+        testsize = *(uint32_t *)(buffer->data + buffer->size);
+        testdata = (uint8_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+        for (int i = 0; i < testsize; i++) {
+            VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+        }
+#endif
+        num_nalus  = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
+        VTRACE("num_nalus = %d", num_nalus);
+        offset = 4;
+        for (int32_t i = 0; i < num_nalus; i++) {
+            VTRACE("%d nalu, offset = %d", i, offset);
+            data_src = buffer->data + buffer->size + sizeof(uint32_t) + offset;
+            nalu_size = *(uint32_t *)(data_src + 2 * sizeof(uint32_t));
+            nalu_size = (nalu_size + 0x03) & (~0x03);
+
+            nalu_data = data_src + 3 *sizeof(uint32_t);
+            naluType  = nalu_data[0] & NALU_TYPE_MASK;
+            offset += nalu_size + 3 *sizeof(uint32_t);
+            VTRACE("naluType = 0x%x", naluType);
+            VTRACE("nalu_size = %d, nalu_data = %p", nalu_size, nalu_data);
+
+            if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+                ETRACE("Slice NALU received!");
+                return DECODE_INVALID_DATA;
+            }
+
+            else if (naluType >= h264_NAL_UNIT_TYPE_SEI && naluType <= h264_NAL_UNIT_TYPE_PPS) {
+                memcpy(mClearData + clear_data_size,
+                    startcodePrefix,
+                    STARTCODE_PREFIX_LEN);
+                clear_data_size += STARTCODE_PREFIX_LEN;
+                memcpy(mClearData + clear_data_size,
+                    nalu_data,
+                    nalu_size);
+                clear_data_size += nalu_size;
+            } else {
+                ETRACE("Failure: DECODE_FRAME_DROPPED");
+                return DECODE_FRAME_DROPPED;
+            }
+        }
+        clear_data = mClearData;
+
+    } else {
+        VTRACE("Decoding clear video ...");
+        mIsEncryptData = 0;
+        mFrameSize = buffer->size;
+        mFrameData = buffer->data;
+        clear_data = buffer->data;
+        clear_data_size = buffer->size;
+    }
+    if (clear_data_size > 0) {
+        status =  VideoDecoderBase::parseBuffer(
+                clear_data,
+                clear_data_size,
+                false,
+                (void**)&data);
+        CHECK_STATUS("VideoDecoderBase::parseBuffer");
+    } else {
+        status =  VideoDecoderBase::queryBuffer((void**)&data);
+        CHECK_STATUS("VideoDecoderBase::queryBuffer");
     }
 
-    for (int32_t i = 0; i < mMetadata.naluNumber; i++) {
-        if (sizeLeft < 12) {
-            ETRACE("Not enough data to parse NALU offset, size, header length for NALU %d, left = %d", i, sizeLeft);
-            return DECODE_INVALID_DATA;
-        }
-        sizeLeft -= 12;
-        // read NALU offset
-        memcpy(&(pNaluInfo->naluOffset), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
-
-        // read NALU size
-        memcpy(&(pNaluInfo->naluLen), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
-
-        // read NALU header length
-        memcpy(&(pNaluInfo->naluHeaderLen), pByteStream, sizeof(int32_t));
-        pByteStream += 4;
-
-
-        if (sizeLeft < pNaluInfo->naluHeaderLen) {
-            ETRACE("Not enough data to copy NALU header for %d, left = %d, header len = %d", i, sizeLeft, pNaluInfo->naluHeaderLen);
-            return DECODE_INVALID_DATA;
-        }
-
-        sizeLeft -=  pNaluInfo->naluHeaderLen;
-
-        if (pNaluInfo->naluHeaderLen) {
-            // copy start code prefix to buffer
-            memcpy(mNaluHeaderBuffer + sizeAccumulated,
-                startcodePrefix,
-                STARTCODE_PREFIX_LEN);
-            sizeAccumulated += STARTCODE_PREFIX_LEN;
-
-            // copy NALU header
-            memcpy(mNaluHeaderBuffer + sizeAccumulated, pByteStream, pNaluInfo->naluHeaderLen);
-            pByteStream += pNaluInfo->naluHeaderLen;
-
-            sizeAccumulated += pNaluInfo->naluHeaderLen;
+    if (!mVAStarted) {
+         if (data->has_sps && data->has_pps) {
+            status = startVA(data);
+            CHECK_STATUS("startVA");
         } else {
-            WTRACE("header len is zero for NALU %d", i);
+            WTRACE("Can't start VA as either SPS or PPS is still not available.");
+            return DECODE_SUCCESS;
         }
-
-        // for next NALU
-        pNaluInfo++;
     }
+    status = decodeFrame(buffer, data);
 
-    buffer->data = mNaluHeaderBuffer;
-    buffer->size = sizeAccumulated;
-
-    return VideoDecoderAVC::decode(buffer);
+    return status;
 }
 
+Decode_Status VideoDecoderAVCSecure::decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+    VTRACE("VideoDecoderAVCSecure::decodeFrame");
+    Decode_Status status;
+    VTRACE("data->has_sps = %d, data->has_pps = %d", data->has_sps, data->has_pps);
+
+#if 0
+    // Don't remove the following codes, it can be enabled for debugging DPB.
+    for (unsigned int i = 0; i < data->num_pictures; i++) {
+        VAPictureH264 &pic = data->pic_data[i].pic_parms->CurrPic;
+        VTRACE("%d: decoding frame %.2f, poc top = %d, poc bottom = %d, flags = %d,  reference = %d",
+                i,
+                buffer->timeStamp/1E6,
+                pic.TopFieldOrderCnt,
+                pic.BottomFieldOrderCnt,
+                pic.flags,
+                (pic.flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+                (pic.flags & VA_PICTURE_H264_LONG_TERM_REFERENCE));
+    }
+#endif
+
+    if (data->new_sps || data->new_pps) {
+        status = handleNewSequence(data);
+        CHECK_STATUS("handleNewSequence");
+    }
+
+    uint64_t lastPTS = mCurrentPTS;
+    mCurrentPTS = buffer->timeStamp;
+
+    // start decoding a new frame
+    status = acquireSurfaceBuffer();
+    CHECK_STATUS("acquireSurfaceBuffer");
+
+    if (mFrameSize > 0) {
+        status = parseSliceHeader(buffer, data);
+    }
+    if (status != DECODE_SUCCESS) {
+        endDecodingFrame(true);
+        return status;
+    }
+
+    status = beginDecodingFrame(data);
+    CHECK_STATUS("beginDecodingFrame");
+
+   // finish decoding the last frame
+    status = endDecodingFrame(false);
+    CHECK_STATUS("endDecodingFrame");
+
+    if (isNewFrame(data, lastPTS == mCurrentPTS) == 0) {
+        ETRACE("Can't handle interlaced frames yet");
+        return DECODE_FAIL;
+    }
+
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::beginDecodingFrame(vbp_data_h264 *data) {
+    VTRACE("VideoDecoderAVCSecure::beginDecodingFrame");
+    Decode_Status status;
+    VAPictureH264 *picture = &(data->pic_data[0].pic_parms->CurrPic);
+    if ((picture->flags & VA_PICTURE_H264_SHORT_TERM_REFERENCE) ||
+        (picture->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE)) {
+        mAcquiredBuffer->referenceFrame = true;
+    } else {
+        mAcquiredBuffer->referenceFrame = false;
+    }
+
+    if (picture->flags & VA_PICTURE_H264_TOP_FIELD) {
+        mAcquiredBuffer->renderBuffer.scanFormat = VA_BOTTOM_FIELD | VA_TOP_FIELD;
+    } else {
+        mAcquiredBuffer->renderBuffer.scanFormat = VA_FRAME_PICTURE;
+    }
+
+    mAcquiredBuffer->renderBuffer.flag = 0;
+    mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
+    mAcquiredBuffer->pictureOrder = getPOC(picture);
+
+    status  = continueDecodingFrame(data);
+    return status;
+}
+
+Decode_Status VideoDecoderAVCSecure::continueDecodingFrame(vbp_data_h264 *data) {
+    VTRACE("VideoDecoderAVCSecure::continueDecodingFrame");
+    Decode_Status status;
+    vbp_picture_data_h264 *picData = data->pic_data;
+
+    if (mAcquiredBuffer == NULL || mAcquiredBuffer->renderBuffer.surface == VA_INVALID_SURFACE) {
+        ETRACE("mAcquiredBuffer is NULL. Implementation bug.");
+        return DECODE_FAIL;
+    }
+    VTRACE("data->num_pictures = %d", data->num_pictures);
+    for (uint32_t picIndex = 0; picIndex < data->num_pictures; picIndex++, picData++) {
+        if (picData == NULL || picData->pic_parms == NULL || picData->slc_data == NULL || picData->num_slices == 0) {
+            return DECODE_PARSER_FAIL;
+        }
+
+        if (picIndex > 0 &&
+            (picData->pic_parms->CurrPic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) == 0) {
+            ETRACE("Packed frame is not supported yet!");
+            return DECODE_FAIL;
+        }
+        VTRACE("picData->num_slices = %d", picData->num_slices);
+        for (uint32_t sliceIndex = 0; sliceIndex < picData->num_slices; sliceIndex++) {
+            status = decodeSlice(data, picIndex, sliceIndex);
+            if (status != DECODE_SUCCESS) {
+                endDecodingFrame(true);
+                // remove current frame from DPB as it can't be decoded.
+                removeReferenceFromDPB(picData->pic_parms);
+                return status;
+            }
+        }
+    }
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::parseSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+    Decode_Status status;
+    VAStatus vaStatus;
+
+    VABufferID sliceheaderbufferID;
+    VABufferID pictureparameterparsingbufferID;
+    VABufferID mSlicebufferID;
+
+    vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+    CHECK_VA_STATUS("vaBeginPicture");
+
+    vaStatus = vaCreateBuffer(
+        mVADisplay,
+        mVAContext,
+        VAParseSliceHeaderGroupBufferType,
+        MAX_SLICEHEADER_BUFFER_SIZE,
+        1,
+        NULL,
+        &sliceheaderbufferID);
+    CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+    void *sliceheaderbuf;
+    vaStatus = vaMapBuffer(
+        mVADisplay,
+        sliceheaderbufferID,
+        &sliceheaderbuf);
+    CHECK_VA_STATUS("vaMapBuffer");
+
+    memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+    vaStatus = vaUnmapBuffer(
+        mVADisplay,
+        sliceheaderbufferID);
+    CHECK_VA_STATUS("vaUnmapBuffer");
+
+
+    vaStatus = vaCreateBuffer(
+        mVADisplay,
+        mVAContext,
+        VASliceDataBufferType,
+        mFrameSize, //size
+        1,        //num_elements
+        mFrameData,
+        &mSlicebufferID);
+    CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+    data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+    data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+    data->pic_parse_buffer->frame_size = mFrameSize;
+    data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+
+#if 0
+
+    VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+    VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+    VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+    VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+    VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+    VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+    VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+    VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+
+    VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+    VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+    VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+    VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+    VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+    VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+    VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+    VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+
+    vaStatus = vaCreateBuffer(
+        mVADisplay,
+        mVAContext,
+        VAParsePictureParameterBufferType,
+        sizeof(VAParsePictureParameterBuffer),
+        1,
+        data->pic_parse_buffer,
+        &pictureparameterparsingbufferID);
+    CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+    vaStatus = vaRenderPicture(
+        mVADisplay,
+        mVAContext,
+        &pictureparameterparsingbufferID,
+        1);
+    CHECK_VA_STATUS("vaRenderPicture");
+
+    vaStatus = vaMapBuffer(
+        mVADisplay,
+        sliceheaderbufferID,
+        &sliceheaderbuf);
+    CHECK_VA_STATUS("vaMapBuffer");
+
+    status = updateSliceParameter(data,sliceheaderbuf);
+    CHECK_STATUS("processSliceHeader");
+
+    vaStatus = vaUnmapBuffer(
+        mVADisplay,
+        sliceheaderbufferID);
+    CHECK_VA_STATUS("vaUnmapBuffer");
+
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
+    VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
+    Decode_Status status;
+    status =  VideoDecoderBase::updateBuffer(
+            (uint8_t *)sliceheaderbuf,
+            MAX_SLICEHEADER_BUFFER_SIZE,
+            (void**)&data);
+    CHECK_STATUS("updateBuffer");
+    return DECODE_SUCCESS;
+}
 
 Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) {
-
     Decode_Status status;
     VAStatus vaStatus;
     uint32_t bufferIDCount = 0;
-    // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data
-    VABufferID bufferIDs[4];
+    // maximum 3 buffers to render a slice: picture parameter, IQMatrix, slice parameter
+    VABufferID bufferIDs[3];
 
     vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]);
     vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
@@ -231,19 +442,8 @@
         // either condition indicates start of a new frame
         if (sliceParam->first_mb_in_slice != 0) {
             WTRACE("The first slice is lost.");
-            // TODO: handle the first slice lost
         }
-        if (mDecodingFrame) {
-            // interlace content, complete decoding the first field
-            vaStatus = vaEndPicture(mVADisplay, mVAContext);
-            CHECK_VA_STATUS("vaEndPicture");
-
-            // for interlace content, top field may be valid only after the second field is parsed
-            mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt;
-        }
-
-        // Check there is no reference frame loss before decoding a frame
-
+        VTRACE("Current frameidx = %d", mFrameIdx++);
         // Update  the reference frames and surface IDs for DPB and current frame
         status = updateDPB(picParam);
         CHECK_STATUS("updateDPB");
@@ -252,10 +452,6 @@
         status = updateReferenceFrames(picData);
         CHECK_STATUS("updateReferenceFrames");
 
-        vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
-        CHECK_VA_STATUS("vaBeginPicture");
-
-        // start decoding a frame
         mDecodingFrame = true;
 
         vaStatus = vaCreateBuffer(
@@ -284,33 +480,7 @@
     status = setReference(sliceParam);
     CHECK_STATUS("setReference");
 
-    // find which naluinfo is correlated to current slice
-    int naluIndex = 0;
-    uint32_t accumulatedHeaderLen = 0;
-    uint32_t headerLen = 0;
-    for (; naluIndex < mMetadata.naluNumber; naluIndex++)  {
-        headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen;
-        if (headerLen == 0) {
-            WTRACE("lenght of current NAL unit is 0.");
-            continue;
-        }
-        accumulatedHeaderLen += STARTCODE_PREFIX_LEN;
-        if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) {
-            break;
-        }
-        accumulatedHeaderLen += headerLen;
-    }
-
-    if (sliceData->slice_offset != accumulatedHeaderLen) {
-        WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen);
-    }
-
-    sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen;
-    uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset;
-    uint32_t slice_offset_shift =  sliceOffset % 16;
-    sliceParam->slice_data_offset += slice_offset_shift;
-    sliceData->slice_size = (sliceParam->slice_data_size + slice_offset_shift + 0xF) & ~0xF;
-
+    sliceParam->slice_data_size = mFrameSize;
     vaStatus = vaCreateBuffer(
         mVADisplay,
         mVAContext,
@@ -322,31 +492,6 @@
     CHECK_VA_STATUS("vaCreateSliceParameterBuffer");
     bufferIDCount++;
 
-    // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit
-    // offset points to first byte of NAL unit
-
-    if (mInputBuffer != NULL) {
-        vaStatus = vaCreateBuffer(
-            mVADisplay,
-            mVAContext,
-            VASliceDataBufferType,
-            sliceData->slice_size,  //Slice size
-            1,                      // num_elements
-            mInputBuffer + sliceOffset - slice_offset_shift,
-            &bufferIDs[bufferIDCount]);
-    } else {
-        vaStatus = vaCreateBuffer(
-            mVADisplay,
-            mVAContext,
-            VAProtectedSliceDataBufferType,
-            sliceData->slice_size, //size
-            1,        //num_elements
-            (uint8_t*)sliceOffset, // IMR offset
-            &bufferIDs[bufferIDCount]);
-    }
-    CHECK_VA_STATUS("vaCreateSliceDataBuffer");
-    bufferIDCount++;
-
     vaStatus = vaRenderPicture(
         mVADisplay,
         mVAContext,
@@ -354,165 +499,25 @@
         bufferIDCount);
     CHECK_VA_STATUS("vaRenderPicture");
 
+    VABufferID slicebufferID;
+
+    vaStatus = vaCreateBuffer(
+        mVADisplay,
+        mVAContext,
+        VASliceDataBufferType,
+        mFrameSize, //size
+        1,        //num_elements
+        mFrameData,
+        &slicebufferID);
+    CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+    vaStatus = vaRenderPicture(
+        mVADisplay,
+        mVAContext,
+        &slicebufferID,
+        1);
+    CHECK_VA_STATUS("vaRenderPicture");
+
     return DECODE_SUCCESS;
+
 }
-
-
-// Parse byte string pattern "0x000001" (3 bytes)  in the current buffer.
-// Returns offset of position following  the pattern in the buffer if pattern is found or -1 if not found.
-int32_t VideoDecoderAVCSecure::findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length) {
-    uint8_t *ptr;
-    uint32_t left = 0, data = 0, phase = 0;
-    uint8_t mask1 = 0, mask2 = 0;
-
-    /* Meaning of phase:
-        0: initial status, "0x000001" bytes are not found so far;
-        1: one "0x00" byte is found;
-        2: two or more consecutive "0x00" bytes" are found;
-        3: "0x000001" patten is found ;
-        4: if there is one more byte after "0x000001";
-       */
-
-    left = length;
-    ptr = (uint8_t *) (stream + offset);
-    phase = 0;
-
-    // parse until there is more data and start code not found
-    while ((left > 0) && (phase < 3)) {
-        // Check if the address is 32-bit aligned & phase=0, if thats the case we can check 4 bytes instead of one byte at a time.
-        if (((((uint32_t)ptr) & 0x3) == 0) && (phase == 0)) {
-            while (left > 3) {
-                data = *((uint32_t *)ptr);
-                mask1 = (STARTCODE_00 != (data & STARTCODE_MASK0));
-                mask2 = (STARTCODE_00 != (data & STARTCODE_MASK1));
-                // If second byte and fourth byte are not zero's then we cannot have a start code here,
-                //  as we need two consecutive zero bytes for a start code pattern.
-                if (mask1 && mask2) {
-                    // skip 4 bytes and start over
-                    ptr += 4;
-                    left -=4;
-                    continue;
-                } else {
-                    break;
-                }
-            }
-        }
-
-        // At this point either data is not on a 32-bit boundary or phase > 0 so we look at one byte at a time
-        if (left > 0) {
-            if (*ptr == STARTCODE_00) {
-                phase++;
-                if (phase > 2) {
-                    // more than 2 consecutive '0x00' bytes is found
-                    phase = 2;
-                }
-            } else if ((*ptr == STARTCODE_01) && (phase == 2)) {
-                // start code is found
-                phase = 3;
-            } else {
-                // reset lookup
-                phase = 0;
-            }
-            ptr++;
-            left--;
-        }
-    }
-
-    if ((left > 0) && (phase == 3)) {
-        phase = 4;
-        // return offset of position following the pattern in the buffer which matches "0x000001" byte string
-        return (int32_t)(ptr - stream);
-    }
-    return -1;
-}
-
-
-Decode_Status VideoDecoderAVCSecure::copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream) {
-    uint8_t naluType;
-    int32_t naluHeaderLen;
-
-    naluType = *(uint8_t *)(stream + naluStream->naluOffset);
-    naluType &= NALU_TYPE_MASK;
-    // first update nalu header length based on nalu type
-    if (naluType >= NAL_UNIT_TYPE_SLICE && naluType <= NAL_UNIT_TYPE_IDR) {
-        // coded slice, return only up to MAX_SLICE_HEADER_SIZE bytes
-        naluHeaderLen = min(naluStream->naluLen, MAX_SLICE_HEADER_SIZE);
-    } else if (naluType >= NAL_UNIT_TYPE_SEI && naluType <= NAL_UNIT_TYPE_PPS) {
-        //sps, pps, sei, etc, return the entire NAL unit in clear
-        naluHeaderLen = naluStream->naluLen;
-    } else {
-        return DECODE_FRAME_DROPPED;
-    }
-
-    memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluOffset), sizeof(int32_t));
-    naluStream->streamPos += 4;
-
-    memcpy(naluStream->byteStream + naluStream->streamPos, &(naluStream->naluLen), sizeof(int32_t));
-    naluStream->streamPos += 4;
-
-    memcpy(naluStream->byteStream + naluStream->streamPos, &naluHeaderLen, sizeof(int32_t));
-    naluStream->streamPos += 4;
-
-    if (naluHeaderLen) {
-        memcpy(naluStream->byteStream + naluStream->streamPos, (uint8_t*)(stream + naluStream->naluOffset), naluHeaderLen);
-        naluStream->streamPos += naluHeaderLen;
-    }
-    return DECODE_SUCCESS;
-}
-
-
-// parse start-code prefixed stream, also knowns as Annex B byte stream, commonly used in AVI, ES, MPEG2 TS container
-Decode_Status VideoDecoderAVCSecure::parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream) {
-    int32_t naluOffset, offset, left;
-    NaluInfo *info;
-    uint32_t ret = DECODE_SUCCESS;
-
-    naluOffset = 0;
-    offset = 0;
-    left = length;
-
-    // leave 4 bytes to copy nalu count
-    naluStream->streamPos = 4;
-    naluStream->naluCount = 0;
-    memset(naluStream->byteStream, 0, MAX_NALU_HEADER_BUFFER);
-
-    for (; ;) {
-        naluOffset = findNalUnitOffset(stream, offset, left);
-        if (naluOffset == -1) {
-            break;
-        }
-
-        if (naluStream->naluCount == 0) {
-            naluStream->naluOffset = naluOffset;
-        } else {
-            naluStream->naluLen = naluOffset - naluStream->naluOffset - STARTCODE_PREFIX_LEN;
-            ret = copyNaluHeader(stream, naluStream);
-            if (ret != DECODE_SUCCESS && ret != DECODE_FRAME_DROPPED) {
-                LOGW("copyNaluHeader returned %d", ret);
-                return ret;
-            }
-            // starting position for next NALU
-            naluStream->naluOffset = naluOffset;
-        }
-
-        if (ret == DECODE_SUCCESS) {
-            naluStream->naluCount++;
-        }
-
-        // update next lookup position and length
-        offset = naluOffset + 1; // skip one byte of NAL unit type
-        left = length - offset;
-    }
-
-    if (naluStream->naluCount > 0) {
-        naluStream->naluLen = length - naluStream->naluOffset;
-        memcpy(naluStream->byteStream, &(naluStream->naluCount), sizeof(int32_t));
-        // ignore return value, either DECODE_SUCCESS or DECODE_FRAME_DROPPED
-        copyNaluHeader(stream, naluStream);
-        return DECODE_SUCCESS;
-    }
-
-    LOGW("number of valid NALU is 0!");
-    return DECODE_SUCCESS;
-}
-
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
old mode 100644
new mode 100755
index af5ae44..6378243
--- a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
@@ -22,62 +22,34 @@
 *
 */
 
-#ifndef VIDEO_DECODER_AVC_SECURE_H_
-#define VIDEO_DECODER_AVC_SECURE_H_
+#ifndef VIDEO_DECODER_AVC_SECURE_H
+#define VIDEO_DECODER_AVC_SECURE_H
 
+#include "VideoDecoderBase.h"
 #include "VideoDecoderAVC.h"
-
+#include "VideoDecoderDefs.h"
 
 class VideoDecoderAVCSecure : public VideoDecoderAVC {
 public:
     VideoDecoderAVCSecure(const char *mimeType);
-    virtual ~VideoDecoderAVCSecure();
-
     virtual Decode_Status start(VideoConfigBuffer *buffer);
     virtual void stop(void);
 
     // data in the decoded buffer is all encrypted.
     virtual Decode_Status decode(VideoDecodeBuffer *buffer);
-
-private:
-    enum {
-        MAX_SLICE_HEADER_SIZE  = 30,
-        MAX_NALU_HEADER_BUFFER = 8192,
-        MAX_NALU_NUMBER = 400,  // > 4096/12
-    };
-
-    // Information of Network Abstraction Layer Unit
-    struct NaluInfo {
-        int32_t naluOffset;                        // offset of NAL unit in the firewalled buffer
-        int32_t naluLen;                           // length of NAL unit
-        int32_t naluHeaderLen;                     // length of NAL unit header
-    };
-
-    struct NaluMetadata {
-        NaluInfo *naluInfo;
-        int32_t naluNumber;  // number of NAL units
-    };
-
-    struct NaluByteStream {
-        int32_t naluOffset;
-        int32_t naluLen;
-        int32_t streamPos;
-        uint8_t *byteStream;   // 4 bytes of naluCount, 4 bytes of naluOffset, 4 bytes of naulLen, 4 bytes of naluHeaderLen, followed by naluHeaderData
-        int32_t naluCount;
-    };
-
+protected:
+    virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+    virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
+    virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
+    Decode_Status parseSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+    Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
     virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
-    int32_t findNalUnitOffset(uint8_t *stream, int32_t offset, int32_t length);
-    Decode_Status copyNaluHeader(uint8_t *stream, NaluByteStream *naluStream);
-    Decode_Status parseAnnexBStream(uint8_t *stream, int32_t length, NaluByteStream *naluStream);
-
 private:
-    NaluMetadata mMetadata;
-    NaluByteStream mByteStream;
-    uint8_t *mNaluHeaderBuffer;
-    uint8_t *mInputBuffer;
+    int32_t     mIsEncryptData;
+    int32_t     mFrameSize;
+    uint8_t*    mFrameData;
+    uint8_t*    mClearData;
+    int32_t     mFrameIdx;
 };
 
-
-
-#endif /* VIDEO_DECODER_AVC_SECURE_H_ */
+#endif