Support Modular DRM for the Merrifield platform

BZ: 175259

Support Modular DRM for the Merrifield platform

Change-Id: Iaa56f19ea44f216183373dd3c45794773ed1b403
Signed-off-by: wfeng6 <wei.feng@intel.com>
diff --git a/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c
index 3f3eeef..60957a5 100755
--- a/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c
+++ b/mixvbp/vbp_manager/secvideo/merrifield/vbp_h264secure_parser.c
@@ -1123,10 +1123,10 @@
 }
 
 
-static uint32_t vbp_add_pic_data_h264secure(vbp_context *pcontext)
+static uint32_t vbp_update_pic_data_h264secure(vbp_context *pcontext)
 {
     viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
-    VTRACE("vbp_add_pic_data_h264secure +++");
+    VTRACE("vbp_update_pic_data_h264secure +++");
     vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
     struct h264_viddec_parser* parser = NULL;
     vbp_picture_data_h264* pic_data = NULL;
@@ -1137,7 +1137,7 @@
     if (0 == parser->info.SliceHeader.first_mb_in_slice)
     {
         /* a new picture is parsed */
-        query_data->num_pictures++;
+        query_data->num_pictures = 1;
     }
 
     if (query_data->num_pictures == 0)
@@ -1272,11 +1272,281 @@
         /* actual num_ref_frames is set in vbp_set_reference_frames_h264 */
     }
 
-    VTRACE("vbp_add_pic_data_h264secure ---");
+    VTRACE("vbp_update_pic_data_h264secure ---");
     return VBP_OK;
 }
 
-static uint32_t vbp_add_slice_data_h264secure(vbp_context *pcontext, uint32 key)
+static uint32_t vbp_add_pic_data_h264secure(vbp_context *pcontext, int list_index)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    struct h264_viddec_parser* parser = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+    VAPictureParameterBufferH264* pic_parms = NULL;
+
+    parser = (struct h264_viddec_parser *)cxt->codec_data;
+
+    if (0 == parser->info.SliceHeader.first_mb_in_slice)
+    {
+        /* a new picture is parsed */
+        query_data->num_pictures = 1;
+    }
+
+    if (query_data->num_pictures == 0)
+    {
+        /* partial frame */
+        query_data->num_pictures = 1;
+    }
+
+    if (query_data->num_pictures > MAX_NUM_PICTURES)
+    {
+        ETRACE("num of pictures exceeds the limit (%d).", MAX_NUM_PICTURES);
+        return VBP_DATA;
+    }
+
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        WTRACE("MB address does not start from 0!");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+    pic_parms = pic_data->pic_parms;
+
+    // relax this condition to support partial frame parsing
+
+    //if (parser->info.SliceHeader.first_mb_in_slice == 0)
+    {
+        /**
+        * picture parameter only needs to be set once,
+        * even multiple slices may be encoded
+        */
+
+        /* VAPictureParameterBufferH264 */
+        pic_parms->CurrPic.picture_id = VA_INVALID_SURFACE;
+        pic_parms->CurrPic.frame_idx = 0;
+        if (parser->info.img.field_pic_flag == 1)
+        {
+            if (parser->info.img.bottom_field_flag)
+            {
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_BOTTOM_FIELD;
+            }
+            else
+            {
+                /* also OK set to 0 (from test suite) */
+                pic_parms->CurrPic.flags = VA_PICTURE_H264_TOP_FIELD;
+            }
+        }
+        else
+        {
+            pic_parms->CurrPic.flags = 0; /* frame picture */
+        }
+        pic_parms->CurrPic.TopFieldOrderCnt = parser->info.img.toppoc;
+        pic_parms->CurrPic.BottomFieldOrderCnt = parser->info.img.bottompoc;
+        pic_parms->CurrPic.frame_idx = parser->info.SliceHeader.frame_num;
+
+        /* don't care if current frame is used as long term reference */
+        if (parser->info.SliceHeader.nal_ref_idc != 0)
+        {
+            pic_parms->CurrPic.flags |= VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+        }
+
+        pic_parms->picture_width_in_mbs_minus1 = parser->info.active_SPS.sps_disp.pic_width_in_mbs_minus1;
+
+        /* frame height in MBS */
+        pic_parms->picture_height_in_mbs_minus1 = (2 - parser->info.active_SPS.sps_disp.frame_mbs_only_flag) *
+                (parser->info.active_SPS.sps_disp.pic_height_in_map_units_minus1 + 1) - 1;
+
+        pic_parms->bit_depth_luma_minus8 = parser->info.active_SPS.bit_depth_luma_minus8;
+        pic_parms->bit_depth_chroma_minus8 = parser->info.active_SPS.bit_depth_chroma_minus8;
+
+
+        pic_parms->seq_fields.value = 0;
+        pic_parms->seq_fields.bits.chroma_format_idc = parser->info.active_SPS.sps_disp.chroma_format_idc;
+        pic_parms->seq_fields.bits.residual_colour_transform_flag = parser->info.active_SPS.residual_colour_transform_flag;
+        pic_parms->seq_fields.bits.frame_mbs_only_flag = parser->info.active_SPS.sps_disp.frame_mbs_only_flag;
+        pic_parms->seq_fields.bits.mb_adaptive_frame_field_flag = parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag;
+        pic_parms->seq_fields.bits.direct_8x8_inference_flag = parser->info.active_SPS.sps_disp.direct_8x8_inference_flag;
+
+        /* new fields in libva 0.31 */
+        pic_parms->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = parser->info.active_SPS.gaps_in_frame_num_value_allowed_flag;
+        pic_parms->seq_fields.bits.log2_max_frame_num_minus4 = parser->info.active_SPS.log2_max_frame_num_minus4;
+        pic_parms->seq_fields.bits.pic_order_cnt_type = parser->info.active_SPS.pic_order_cnt_type;
+        pic_parms->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = parser->info.active_SPS.log2_max_pic_order_cnt_lsb_minus4;
+        pic_parms->seq_fields.bits.delta_pic_order_always_zero_flag =parser->info.active_SPS.delta_pic_order_always_zero_flag;
+
+
+        /* referened from UMG_Moorstown_TestSuites */
+        pic_parms->seq_fields.bits.MinLumaBiPredSize8x8 = (parser->info.active_SPS.level_idc > 30) ? 1 : 0;
+
+        pic_parms->num_slice_groups_minus1 = parser->info.active_PPS.num_slice_groups_minus1;
+        pic_parms->slice_group_map_type = parser->info.active_PPS.slice_group_map_type;
+        pic_parms->slice_group_change_rate_minus1 = 0;
+        pic_parms->pic_init_qp_minus26 = parser->info.active_PPS.pic_init_qp_minus26;
+        pic_parms->pic_init_qs_minus26 = 0;
+        pic_parms->chroma_qp_index_offset = parser->info.active_PPS.chroma_qp_index_offset;
+        pic_parms->second_chroma_qp_index_offset = parser->info.active_PPS.second_chroma_qp_index_offset;
+
+        pic_parms->pic_fields.value = 0;
+        pic_parms->pic_fields.bits.entropy_coding_mode_flag = parser->info.active_PPS.entropy_coding_mode_flag;
+        pic_parms->pic_fields.bits.weighted_pred_flag = parser->info.active_PPS.weighted_pred_flag;
+        pic_parms->pic_fields.bits.weighted_bipred_idc = parser->info.active_PPS.weighted_bipred_idc;
+        pic_parms->pic_fields.bits.transform_8x8_mode_flag = parser->info.active_PPS.transform_8x8_mode_flag;
+
+        /* new LibVA fields in v0.31*/
+        pic_parms->pic_fields.bits.pic_order_present_flag = parser->info.active_PPS.pic_order_present_flag;
+        pic_parms->pic_fields.bits.deblocking_filter_control_present_flag = parser->info.active_PPS.deblocking_filter_control_present_flag;
+        pic_parms->pic_fields.bits.redundant_pic_cnt_present_flag = parser->info.active_PPS.redundant_pic_cnt_present_flag;
+        pic_parms->pic_fields.bits.reference_pic_flag = parser->info.SliceHeader.nal_ref_idc != 0;
+
+        /* all slices in the pciture have the same field_pic_flag */
+        pic_parms->pic_fields.bits.field_pic_flag = parser->info.SliceHeader.field_pic_flag;
+        pic_parms->pic_fields.bits.constrained_intra_pred_flag = parser->info.active_PPS.constrained_intra_pred_flag;
+
+        pic_parms->frame_num = parser->info.SliceHeader.frame_num;
+    }
+
+
+    /* set reference frames, and num_ref_frames */
+    vbp_set_reference_frames_h264secure(parser, pic_parms);
+    if (parser->info.nal_unit_type == h264_NAL_UNIT_TYPE_IDR)
+    {
+        int frame_idx;
+        for (frame_idx = 0; frame_idx < 16; frame_idx++)
+        {
+            pic_parms->ReferenceFrames[frame_idx].picture_id = VA_INVALID_SURFACE;
+            pic_parms->ReferenceFrames[frame_idx].frame_idx = 0;
+            pic_parms->ReferenceFrames[frame_idx].flags = VA_PICTURE_H264_INVALID;
+            pic_parms->ReferenceFrames[frame_idx].TopFieldOrderCnt = 0;
+            pic_parms->ReferenceFrames[frame_idx].BottomFieldOrderCnt = 0;
+        }
+        /* num of reference frame is 0 if current picture is IDR */
+        pic_parms->num_ref_frames = 0;
+    }
+    else
+    {
+        /* actual num_ref_frames is set in vbp_set_reference_frames_h264 */
+    }
+
+    return VBP_OK;
+}
+
+static uint32_t vbp_add_slice_data_h264secure(vbp_context *pcontext, int index)
+{
+    viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
+    uint32 bit, byte;
+    uint8 is_emul;
+
+    vbp_data_h264 *query_data = (vbp_data_h264 *)pcontext->query_data;
+    VASliceParameterBufferH264 *slc_parms = NULL;
+    vbp_slice_data_h264 *slc_data = NULL;
+    struct h264_viddec_parser* h264_parser = NULL;
+    h264_Slice_Header_t* slice_header = NULL;
+    vbp_picture_data_h264* pic_data = NULL;
+
+    h264_parser = (struct h264_viddec_parser *)cxt->codec_data;
+    int pic_data_index = query_data->num_pictures - 1;
+    if (pic_data_index < 0)
+    {
+        ETRACE("invalid picture data index.");
+        return VBP_DATA;
+    }
+
+    pic_data = &(query_data->pic_data[pic_data_index]);
+
+    slc_data = &(pic_data->slc_data[pic_data->num_slices]);
+    slc_data->buffer_addr = cxt->parse_cubby.buf;
+    slc_parms = &(slc_data->slc_parms);
+
+    /* byte: how many bytes have been parsed */
+    /* bit: bits parsed within the current parsing position */
+    viddec_pm_get_au_pos(cxt, &bit, &byte, &is_emul);
+
+    slc_data->nal_unit_type = h264_parser->info.nal_unit_type;
+
+    slc_parms->slice_data_size = slc_data->slice_size =
+                                     pcontext->parser_cxt->list.data[index].edpos -
+                                     pcontext->parser_cxt->list.data[index].stpos;
+
+    /* the offset to the NAL start code for this slice */
+    slc_data->slice_offset = cxt->list.data[index].stpos;
+    slc_parms->slice_data_offset = 0;
+
+    /* whole slice is in this buffer */
+    slc_parms->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
+
+    slice_header = &(h264_parser->info.SliceHeader);
+    slc_parms->first_mb_in_slice = slice_header->first_mb_in_slice;
+    slc_parms->slice_type = slice_header->slice_type;
+
+    /* bit offset from NAL start code to the beginning of slice data */
+    slc_parms->slice_data_bit_offset = bit + byte * 8;
+
+    if (is_emul)
+    {
+        WTRACE("next byte is emulation prevention byte.");
+        /*slc_parms->slice_data_bit_offset += 8; */
+    }
+
+    if (cxt->getbits.emulation_byte_counter != 0)
+    {
+        slc_parms->slice_data_bit_offset -= cxt->getbits.emulation_byte_counter * 8;
+    }
+
+    slice_header = &(h264_parser->info.SliceHeader);
+    slc_parms->first_mb_in_slice = slice_header->first_mb_in_slice;
+
+    if (h264_parser->info.active_SPS.sps_disp.mb_adaptive_frame_field_flag &
+            (!(h264_parser->info.SliceHeader.field_pic_flag)))
+    {
+        slc_parms->first_mb_in_slice /= 2;
+    }
+
+    slc_parms->slice_type = slice_header->slice_type;
+
+    slc_parms->direct_spatial_mv_pred_flag = slice_header->direct_spatial_mv_pred_flag;
+
+    slc_parms->num_ref_idx_l0_active_minus1 = 0;
+    slc_parms->num_ref_idx_l1_active_minus1 = 0;
+
+    if (slice_header->slice_type == h264_PtypeP)
+    {
+        slc_parms->num_ref_idx_l0_active_minus1 = slice_header->num_ref_idx_l0_active - 1;
+    }
+    else if (slice_header->slice_type == h264_PtypeB)
+    {
+        slc_parms->num_ref_idx_l0_active_minus1 = slice_header->num_ref_idx_l0_active - 1;
+        slc_parms->num_ref_idx_l1_active_minus1 = slice_header->num_ref_idx_l1_active - 1;
+    }
+    else if (slice_header->slice_type != h264_PtypeI)
+    {
+        WTRACE("slice type %d is not supported.", slice_header->slice_type);
+    }
+
+    slc_parms->cabac_init_idc = slice_header->cabac_init_idc;
+    slc_parms->slice_qp_delta = slice_header->slice_qp_delta;
+    slc_parms->disable_deblocking_filter_idc = slice_header->disable_deblocking_filter_idc;
+    slc_parms->slice_alpha_c0_offset_div2 = slice_header->slice_alpha_c0_offset_div2;
+    slc_parms->slice_beta_offset_div2 = slice_header->slice_beta_offset_div2;
+
+    vbp_set_pre_weight_table_h264secure(h264_parser, slc_parms);
+    vbp_set_slice_ref_list_h264secure(h264_parser, slc_parms);
+
+    pic_data->num_slices++;
+
+    if (pic_data->num_slices > MAX_NUM_SLICES)
+    {
+        ETRACE("number of slices per picture exceeds the limit (%d).", MAX_NUM_SLICES);
+        return VBP_DATA;
+    }
+
+    return VBP_OK;
+}
+
+
+static uint32_t vbp_update_slice_data_h264secure(vbp_context *pcontext, uint32 key)
 {
     VTRACE("vbp_add_slice_data_h264secure +++");
     viddec_pm_cxt_t *cxt = pcontext->parser_cxt;
@@ -1765,10 +2035,20 @@
     {
     case h264_NAL_UNIT_TYPE_SLICE:
         VTRACE("slice header is parsed.");
+        error = vbp_add_pic_data_h264secure(pcontext, i);
+        if (VBP_OK == error)
+        {
+            error = vbp_add_slice_data_h264secure(pcontext, i);
+        }
         break;
 
     case  h264_NAL_UNIT_TYPE_IDR:
         VTRACE("IDR header is parsed.");
+        error = vbp_add_pic_data_h264secure(pcontext, i);
+        if (VBP_OK == error)
+        {
+            error = vbp_add_slice_data_h264secure(pcontext, i);
+        }
         break;
     case h264_NAL_UNIT_TYPE_SEI:
         VTRACE("SEI header is parsed.");
@@ -1892,6 +2172,12 @@
         sliceheader_p->weight      = (int16_t *)((uint8_t *)sliceheader_p->reorder_cmd + reordercmdnum * sizeof(uint32));
 
         sliceheader_p->pic_marking = (uint32_t *)((uint8_t *)sliceheader_p->weight + weight_pos);
+
+        if (sliceheader_p->parsedSliceHeader.size <= 0) {
+            ETRACE("Invalid slice header size %d", sliceheader_p->parsedSliceHeader.size);
+            return VBP_DATA;
+        }
+
         offset += sliceheader_p->parsedSliceHeader.size;
         error = pcontext->parser_ops->update_data(pcontext->parser_cxt,
                                                 sliceheader_p, sizeof(vbp_h264_sliceheader));
@@ -1901,14 +2187,14 @@
             return error;
         }
 
-        error = vbp_add_pic_data_h264secure(pcontext);
+        error = vbp_update_pic_data_h264secure(pcontext);
         if (error != VBP_OK)
         {
             ETRACE("vbp_add_pic_data_h264secure error = 0x%x",error);
             return error;
         }
 
-        error = vbp_add_slice_data_h264secure(pcontext,key);
+        error = vbp_update_slice_data_h264secure(pcontext,key);
         if (error != VBP_OK)
         {
             ETRACE("vbp_add_slice_data_h264secure error = 0x%x",error);
@@ -1935,4 +2221,3 @@
 
     return error;
 }
-
diff --git a/mixvbp/vbp_plugin/h264/secvideo/baytrail/viddec_h264secure_parse.c b/mixvbp/vbp_plugin/h264/secvideo/baytrail/viddec_h264secure_parse.c
index 9490ddd..ef03351 100755
--- a/mixvbp/vbp_plugin/h264/secvideo/baytrail/viddec_h264secure_parse.c
+++ b/mixvbp/vbp_plugin/h264/secvideo/baytrail/viddec_h264secure_parse.c
@@ -56,7 +56,7 @@
     switch ( pInfo->nal_unit_type )
     {
     case h264_NAL_UNIT_TYPE_IDR:
-        if (pInfo->got_start)	{
+        if (pInfo->got_start) {
             pInfo->img.recovery_point_found |= 1;
         }
 
@@ -83,7 +83,7 @@
         h264_memset(&next_SliceHeader, 0x0, sizeof(h264_Slice_Header_t));
         next_SliceHeader.nal_ref_idc = nal_ref_idc;
 
-        if ( (1==pInfo->primary_pic_type_plus_one)&&(pInfo->got_start))
+        if ((1==pInfo->primary_pic_type_plus_one)&&(pInfo->got_start))
         {
             pInfo->img.recovery_point_found |=4;
         }
@@ -199,7 +199,6 @@
             h264_update_img_info(pInfo);
         }
 
-
         //////////////////////////////////////////////////////////////
         // Step 4: DPB reference list init and reordering
         //////////////////////////////////////////////////////////////
diff --git a/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c b/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c
index 2e5ac06..06efe5f 100755
--- a/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c
+++ b/mixvbp/vbp_plugin/h264/secvideo/merrifield/viddec_h264secure_parse.c
@@ -133,7 +133,127 @@
         pInfo->sei_rp_received = 0;
 
     case h264_NAL_UNIT_TYPE_SLICE:
-        pInfo->has_slice = 1;
+        {
+            pInfo->has_slice = 1;
+            h264_Slice_Header_t next_SliceHeader;
+            /// Reset next slice header
+            h264_memset(&next_SliceHeader, 0x0, sizeof(h264_Slice_Header_t));
+            next_SliceHeader.nal_ref_idc = nal_ref_idc;
+
+            if ((1 == pInfo->primary_pic_type_plus_one) && (pInfo->got_start))
+            {
+                pInfo->img.recovery_point_found |= 4;
+            }
+            pInfo->primary_pic_type_plus_one = 0;
+
+            ////////////////////////////////////////////////////////////////////////////
+            // Step 2: Parsing slice header
+            ////////////////////////////////////////////////////////////////////////////
+            /// PWT
+            pInfo->h264_pwt_start_byte_offset = 0;
+            pInfo->h264_pwt_start_bit_offset = 0;
+            pInfo->h264_pwt_end_byte_offset = 0;
+            pInfo->h264_pwt_end_bit_offset = 0;
+            pInfo->h264_pwt_enabled = 0;
+            /// IDR flag
+            next_SliceHeader.idr_flag = (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR);
+
+            /// Pass slice header
+            status = h264_Parse_Slice_Layer_Without_Partitioning_RBSP(parent, pInfo, &next_SliceHeader);
+            pInfo->sei_information.recovery_point = 0;
+
+            if (next_SliceHeader.sh_error & 3)
+            {
+                ETRACE("Slice Header parsing error.\n");
+                break;
+            }
+            pInfo->img.current_slice_num++;
+
+            ////////////////////////////////////////////////////////////////////////////
+            // Step 3: Processing if new picture coming
+            //  1) if it's the second field
+            //  2) if it's a new frame
+            ////////////////////////////////////////////////////////////////////////////
+            //AssignQuantParam(pInfo);
+            if (h264_is_new_picture_start(pInfo, next_SliceHeader, pInfo->SliceHeader))
+            {
+                //
+                ///----------------- New Picture.boundary detected--------------------
+                //
+                pInfo->img.g_new_pic++;
+                //
+                // Complete previous picture
+                h264_dpb_store_previous_picture_in_dpb(pInfo, 0, 0); //curr old
+                //h264_hdr_post_poc(0, 0, use_old);
+
+                //
+                // Update slice structures:
+                h264_update_old_slice(pInfo, next_SliceHeader);     //cur->old; next->cur;
+
+                //
+                // 1) if resolution change: reset dpb
+                // 2) else: init frame store
+                h264_update_img_info(pInfo);                                //img, dpb
+
+                //
+                ///----------------- New frame.boundary detected--------------------
+                //
+                pInfo->img.second_field = h264_is_second_field(pInfo);
+                if (pInfo->img.second_field == 0)
+                {
+                    pInfo->img.g_new_frame = 1;
+                    h264_dpb_update_queue_dangling_field(pInfo);
+                    //
+                    /// DPB management
+                    /// 1) check the gaps
+                    /// 2) assign fs for non-exist frames
+                    /// 3) fill the gaps
+                    /// 4) store frame into DPB if ...
+                    //
+                    //if(pInfo->SliceHeader.redundant_pic_cnt)
+                    {
+                        h264_dpb_gaps_in_frame_num_mem_management(pInfo);
+                    }
+                }
+                //
+                /// Decoding POC
+                h264_hdr_decoding_poc (pInfo, 0, 0);
+                //
+                /// Init Frame Store for next frame
+                h264_dpb_init_frame_store (pInfo);
+                pInfo->img.current_slice_num = 1;
+                if (pInfo->SliceHeader.first_mb_in_slice != 0)
+                {
+                    ////Come here means we have slice lost at the beginning, since no FMO support
+                    pInfo->SliceHeader.sh_error |= (pInfo->SliceHeader.structure << 17);
+                }
+                /// Emit out the New Frame
+                if (pInfo->img.g_new_frame)
+                {
+                    h264_parse_emit_start_new_frame(parent, pInfo);
+                }
+                h264_parse_emit_current_pic(parent, pInfo);
+            }
+            else ///////////////////////////////////////////////////// If Not a picture start
+            {
+                //
+                /// Update slice structures: cur->old; next->cur;
+                h264_update_old_slice(pInfo, next_SliceHeader);
+                //
+                /// 1) if resolution change: reset dpb
+                /// 2) else: update img info
+                h264_update_img_info(pInfo);
+            }
+            //////////////////////////////////////////////////////////////
+            // Step 4: DPB reference list init and reordering
+            //////////////////////////////////////////////////////////////
+            //////////////////////////////////////////////// Update frame Type--- IDR/I/P/B for frame or field
+            h264_update_frame_type(pInfo);
+
+            h264_dpb_update_ref_lists(pInfo);
+            /// Emit out the current "good" slice
+            h264_parse_emit_current_slice(parent, pInfo);
+        }
         break;
 
     ///// * Main profile doesn't support Data Partition, skipped.... *////
@@ -196,7 +316,7 @@
                 //h264_memset(&(pInfo->active_SPS), 0x0, sizeof(seq_param_set));
               //  h264_Parse_Copy_Sps_From_DDR(pInfo, &(pInfo->active_SPS), old_sps_id);
                 VTRACE("old_sps_id==pInfo->active_SPS.seq_parameter_set_id");
-               // pInfo->active_SPS.seq_parameter_set_id = 0xff;
+                pInfo->active_SPS.seq_parameter_set_id = 0xff;
             }
         }
 
diff --git a/mixvbp/vbp_plugin/h264/viddec_h264_parse.c b/mixvbp/vbp_plugin/h264/viddec_h264_parse.c
index 28a319a..b3639c4 100755
--- a/mixvbp/vbp_plugin/h264/viddec_h264_parse.c
+++ b/mixvbp/vbp_plugin/h264/viddec_h264_parse.c
@@ -102,7 +102,6 @@
         /// IDR flag
         next_SliceHeader.idr_flag = (pInfo->nal_unit_type == h264_NAL_UNIT_TYPE_IDR);
 
-
         /// Pass slice header
         status = h264_Parse_Slice_Layer_Without_Partitioning_RBSP(parent, pInfo, &next_SliceHeader);
 
diff --git a/test/Android.mk b/test/Android.mk
index 6140f34..a231a46 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -6,9 +6,9 @@
 
 LOCAL_C_INCLUDES :=             \
     $(TARGET_OUT_HEADERS)/libva_videodecoder \
-    $(TOP)/vendor/intel/hardware/PRIVATE/libmix/videodecoder \
     $(TARGET_OUT_HEADERS)/libva \
     $(TARGET_OUT_HEADERS)/libmixvbp \
+    $(TARGET_OUT_HEADERS)/libmix_videodecoder
 
 LOCAL_SHARED_LIBRARIES :=       \
         libva_videodecoder liblog libva
diff --git a/test/mix_decoder.cpp b/test/mix_decoder.cpp
index ef4e310..ab82747 100755
--- a/test/mix_decoder.cpp
+++ b/test/mix_decoder.cpp
@@ -5,17 +5,18 @@
 #include <vbp_loader.h>
 #include <va/va.h>
 #include <stdlib.h>
-
+#include <VideoFrameInfo.h>
 #define INPUTSIZE   (4*1024*1024)
 static int gImgWidth;
 static int gImgHeight;
 static int gCodec;
 static int gOutputSize;
+static int gFrame;
 
 void CheckArgs(int argc, char* argv[])
 {
     char c;
-    while ((c =getopt(argc, argv,"c:w:h:?") ) != EOF) {
+    while ((c =getopt(argc, argv,"c:w:h:f:?") ) != EOF) {
         switch (c) {
                 case 'w':
                     gImgWidth = atoi(optarg);
@@ -26,6 +27,9 @@
                 case 'c':
                     gCodec = atoi(optarg);
                     break;
+                case 'f':
+                    gFrame = atoi(optarg);
+                    break;
                 case '?':
                 default:
                     ALOGI("./mix_encode -c Codec -w SrcWidth -h SrcHeight");
@@ -52,6 +56,8 @@
     uint32_t out_size;
     char *codecname = NULL;
 
+    uint8_t nalutype;
+
     char codecnamelist[2][32] = {"video/avc", "video/avc-secure"};
 
     CheckArgs(argc, argv);
@@ -71,6 +77,13 @@
         return -1;
     }
 
+    if (gFrame < 0) {
+        ALOGE("Err: wrong frame number = %d", gFrame);
+        return -1;
+    }
+
+    framenum = gFrame;
+
     gOutputSize = gImgWidth * gImgHeight * 3/2;
 
     VideoDecodeBuffer buffer;
@@ -90,6 +103,7 @@
 
     configBuffer.width = gImgWidth;
     configBuffer.height = gImgHeight;
+    configBuffer.flag |= IS_SUBSAMPLE_ENCRYPTION;
 
     testDecoder->start(&configBuffer);
 
@@ -105,8 +119,12 @@
         return -1;
     }
 
+    frame_info_t frame_info;
+
     for (frameidx = 0; frameidx < framenum; frameidx++) {
-        sprintf(inputfilename, "/data/decrypted_frame/decrypted_frame_%d.h264", frameidx);
+
+        memset(inBuf, 0, INPUTSIZE);
+        sprintf(inputfilename, "/data/bitstream/frame_%04d.bin", frameidx);
         if((fp_in = fopen(inputfilename,"rb")) == NULL) {
             ALOGE("Fail to open inputfilename %s", inputfilename);
             return -1;
@@ -121,10 +139,52 @@
         }
         fread(inBuf, 1, in_size, fp_in);
         fclose(fp_in);
-
         memset(&buffer, 0, sizeof(VideoDecodeBuffer));
-        buffer.data = inBuf;
-        buffer.size = in_size;
+
+        nalutype = inBuf[4] & 0x1F;
+        if (nalutype == 0x07 || nalutype == 0x08) {
+            ALOGV("Clear SPS/PPS is sent");
+            frame_info.data = inBuf;
+            frame_info.size = in_size;
+            frame_info.num_nalus = 1;
+            frame_info.nalus[0].data = inBuf;
+            frame_info.nalus[0].length = in_size;
+            frame_info.nalus[0].type = inBuf[4];
+            frame_info.nalus[0].offset = 0;
+            buffer.data = (uint8_t *)&frame_info;
+            buffer.size = sizeof(frame_info_t);
+            buffer.flag |= IS_SECURE_DATA;
+
+   //         buffer.data = inBuf;
+   //         buffer.size = in_size;
+        } else {
+#if 0
+            ALOGV("Encrypted slice data is sent");
+            frame_info.data = (uint8_t *) &inBuf[5];
+            frame_info.size = in_size - 5;
+            frame_info.subsamplenum = 1;
+            frame_info.subsampletable[0].subsample_type = inBuf[4];
+            frame_info.subsampletable[0].subsample_size = in_size - 5;
+#endif
+            ALOGV("Encrypted slice data is sent");
+            frame_info.data = inBuf;
+            frame_info.size = in_size;
+            frame_info.num_nalus = 2;
+            frame_info.nalus[0].offset = 0;
+            frame_info.nalus[0].type = 0x06;
+            frame_info.nalus[0].length = 5;
+            frame_info.nalus[0].data = NULL;
+
+            frame_info.nalus[1].offset = 5;
+            frame_info.nalus[1].type = inBuf[4];
+            frame_info.nalus[1].length = in_size - 5;
+            frame_info.nalus[1].data = NULL;
+
+            buffer.data = (uint8_t *)&frame_info;
+            buffer.size = sizeof(frame_info_t);
+            buffer.flag |= IS_SECURE_DATA;
+        }
+
         buffer.rotationDegrees = 0;
         buffer.timeStamp = frameidx;
 
@@ -138,7 +198,7 @@
 
             renderbuf->renderDone = true;
             ALOGV("Output frame %d, out_size = %d", outidx, out_size);
-            sprintf(outputfilename, "/data/decodedframe/frame_%d.bin", outidx++);
+            sprintf(outputfilename, "/data/outputsurface/frame_%04d.bin", outidx++);
             if((fp_out = fopen(outputfilename,"wb")) == NULL) {
                 ALOGE("Fail to open outputfile: %s", outputfilename);
                 return -1;
diff --git a/videodecoder/Android.mk b/videodecoder/Android.mk
index 6041456..eef5828 100644
--- a/videodecoder/Android.mk
+++ b/videodecoder/Android.mk
@@ -71,6 +71,10 @@
     VideoDecoderInterface.h \
     VideoDecoderDefs.h
 
+ifneq ($(filter $(TARGET_BOARD_PLATFORM),$(PLATFORM_SUPPORT_SLICE_HEADER_PARSER)),)
+    LOCAL_COPY_HEADERS += securevideo/merrifield/VideoFrameInfo.h
+endif
+
 LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE := libva_videodecoder
 
diff --git a/videodecoder/VideoDecoderBase.cpp b/videodecoder/VideoDecoderBase.cpp
index 31bbac3..a1e3866 100644
--- a/videodecoder/VideoDecoderBase.cpp
+++ b/videodecoder/VideoDecoderBase.cpp
@@ -804,7 +804,7 @@
         status = checkHardwareCapability(profile);
         CHECK_STATUS("checkHardwareCapability");
 
-#ifdef USE_AVC_SHORT_FORMAT
+#if (defined USE_AVC_SHORT_FORMAT || defined USE_SLICE_HEADER_PARSING)
         status = getCodecSpecificConfigs(profile, &mVAConfig);
         CHECK_STATUS("getCodecSpecificAttributes");
 #else
diff --git a/videodecoder/VideoDecoderDefs.h b/videodecoder/VideoDecoderDefs.h
index 67139f0..9ab59dc 100644
--- a/videodecoder/VideoDecoderDefs.h
+++ b/videodecoder/VideoDecoderDefs.h
@@ -116,6 +116,10 @@
 
     // indicate adaptive playback mode
     WANT_ADAPTIVE_PLAYBACK = 0x100000,
+
+    // indicate the modular drm type
+    IS_SUBSAMPLE_ENCRYPTION = 0x200000,
+
 } VIDEO_BUFFER_FLAG;
 
 typedef enum
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
index 671e8bd..08b9ef0 100755
--- a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.cpp
@@ -21,12 +21,14 @@
 * approved by Intel in writing.
 *
 */
+
 #include <va/va.h>
 #include "VideoDecoderBase.h"
 #include "VideoDecoderAVC.h"
 #include "VideoDecoderTrace.h"
 #include "vbp_loader.h"
 #include "VideoDecoderAVCSecure.h"
+#include "VideoFrameInfo.h"
 
 #define MAX_SLICEHEADER_BUFFER_SIZE 4096
 #define STARTCODE_PREFIX_LEN        3
@@ -61,12 +63,20 @@
     mFrameData     = NULL;
     mIsEncryptData = 0;
     mClearData     = NULL;
+    mCachedHeader  = NULL;
     setParserType(VBP_H264SECURE);
     mFrameIdx = 0;
+    mModularMode = 0;
+    mSliceNum = 0;
 }
 
 Decode_Status VideoDecoderAVCSecure::start(VideoConfigBuffer *buffer) {
     VTRACE("VideoDecoderAVCSecure::start");
+
+    if (buffer->flag & IS_SUBSAMPLE_ENCRYPTION) {
+        mModularMode = 1;
+    }
+
     Decode_Status status = VideoDecoderAVC::start(buffer);
     if (status != DECODE_SUCCESS) {
         return status;
@@ -78,6 +88,12 @@
         return DECODE_MEMORY_FAIL;
     }
 
+    mCachedHeader= new uint8_t [MAX_SLICEHEADER_BUFFER_SIZE];
+    if (mCachedHeader == NULL) {
+        ETRACE("Failed to allocate memory for mCachedHeader");
+        return DECODE_MEMORY_FAIL;
+    }
+
     return status;
 }
 
@@ -89,16 +105,130 @@
         delete [] mClearData;
         mClearData = NULL;
     }
-}
 
-Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
-    VTRACE("VideoDecoderAVCSecure::decode");
+    if (mCachedHeader) {
+        delete [] mCachedHeader;
+        mCachedHeader = NULL;
+    }
+}
+Decode_Status VideoDecoderAVCSecure::processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+    VTRACE("processModularInputBuffer +++");
     Decode_Status status;
-    vbp_data_h264 *data = NULL;
-    if (buffer == NULL) {
-        return DECODE_INVALID_DATA;
+    int32_t clear_data_size = 0;
+    uint8_t *clear_data = NULL;
+
+    int32_t nalu_num = 0;
+    uint8_t nalu_type = 0;
+    int32_t nalu_offset = 0;
+    uint32_t nalu_size = 0;
+    uint8_t naluType = 0;
+    uint8_t *nalu_data = NULL;
+    uint32_t sliceidx = 0;
+
+    frame_info_t *pFrameInfo = NULL;
+    mSliceNum = 0;
+    memset(&mSliceInfo, 0, sizeof(mSliceInfo));
+    mIsEncryptData = 0;
+
+    if (buffer->flag & IS_SECURE_DATA) {
+        VTRACE("Decoding protected video ...");
+        pFrameInfo = (frame_info_t *) buffer->data;
+        if (pFrameInfo == NULL) {
+            ETRACE("Invalid parameter: pFrameInfo is NULL!");
+            return DECODE_MEMORY_FAIL;
+        }
+
+        mFrameData = pFrameInfo->data;
+        mFrameSize = pFrameInfo->size;
+        VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
+
+        nalu_num  = pFrameInfo->num_nalus;
+        VTRACE("nalu_num = %d", nalu_num);
+
+        if (nalu_num <= 0 || nalu_num >= MAX_NUM_NALUS) {
+            ETRACE("Invalid parameter: nalu_num = %d", nalu_num);
+            return DECODE_MEMORY_FAIL;
+        }
+
+        for (int32_t i = 0; i < nalu_num; i++) {
+
+            nalu_size = pFrameInfo->nalus[i].length;
+            nalu_type = pFrameInfo->nalus[i].type;
+            nalu_offset = pFrameInfo->nalus[i].offset;
+            nalu_data = pFrameInfo->nalus[i].data;
+            naluType  = nalu_type & NALU_TYPE_MASK;
+
+            VTRACE("nalu_type = 0x%x, nalu_size = %d, nalu_offset = 0x%x", nalu_type, nalu_size, nalu_offset);
+
+            if (naluType >= h264_NAL_UNIT_TYPE_SLICE && naluType <= h264_NAL_UNIT_TYPE_IDR) {
+
+                mIsEncryptData = 1;
+                VTRACE("slice idx = %d", sliceidx);
+                mSliceInfo[sliceidx].sliceHeaderByte = nalu_type;
+                mSliceInfo[sliceidx].sliceStartOffset = (nalu_offset >> 4) << 4;
+                mSliceInfo[sliceidx].sliceByteOffset = nalu_offset - mSliceInfo[sliceidx].sliceStartOffset;
+                mSliceInfo[sliceidx].sliceLength = nalu_size;
+                mSliceInfo[sliceidx].sliceSize = (mSliceInfo[sliceidx].sliceByteOffset + nalu_size + 0xF) & ~0xF;
+                VTRACE("sliceHeaderByte = 0x%x", mSliceInfo[sliceidx].sliceHeaderByte);
+                VTRACE("sliceStartOffset = %d", mSliceInfo[sliceidx].sliceStartOffset);
+                VTRACE("sliceByteOffset = %d", mSliceInfo[sliceidx].sliceByteOffset);
+                VTRACE("sliceSize = %d", mSliceInfo[sliceidx].sliceSize);
+
+#if 0
+                uint32_t testsize;
+                uint8_t *testdata;
+                testsize = mSliceInfo[sliceidx].sliceSize > 64 ? 64 : mSliceInfo[sliceidx].sliceSize ;
+                testdata = (uint8_t *)(mFrameData);
+                for (int i = 0; i < testsize; i++) {
+                    VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+                }
+#endif
+                sliceidx++;
+
+            } else if (naluType == h264_NAL_UNIT_TYPE_SPS || naluType == h264_NAL_UNIT_TYPE_PPS) {
+                if (nalu_data == NULL) {
+                    ETRACE("Invalid parameter: nalu_data = NULL for naluType 0x%x", naluType);
+                    return DECODE_MEMORY_FAIL;
+                }
+                memcpy(mClearData + clear_data_size,
+                    nalu_data,
+                    nalu_size);
+                clear_data_size += nalu_size;
+            } else {
+                ITRACE("Nalu type = 0x%x is skipped", naluType);
+                continue;
+            }
+        }
+        clear_data = mClearData;
+        mSliceNum = sliceidx;
+
+    } else {
+        VTRACE("Decoding clear video ...");
+        mIsEncryptData = 0;
+        mFrameSize = buffer->size;
+        mFrameData = buffer->data;
+        clear_data = buffer->data;
+        clear_data_size = buffer->size;
     }
 
+    if (clear_data_size > 0) {
+        status =  VideoDecoderBase::parseBuffer(
+                clear_data,
+                clear_data_size,
+                false,
+                (void**)data);
+        CHECK_STATUS("VideoDecoderBase::parseBuffer");
+    } else {
+        status =  VideoDecoderBase::queryBuffer((void**)data);
+        CHECK_STATUS("VideoDecoderBase::queryBuffer");
+    }
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data)
+{
+    Decode_Status status;
     int32_t clear_data_size = 0;
     uint8_t *clear_data = NULL;
     uint8_t naluType = 0;
@@ -109,8 +239,7 @@
     uint8_t *data_src;
     uint8_t *nalu_data;
     uint32_t nalu_size;
-//    uint32_t testsize;
-//    uint8_t *testdata;
+
     if (buffer->flag & IS_SECURE_DATA) {
         VTRACE("Decoding protected video ...");
         mIsEncryptData = 1;
@@ -118,13 +247,6 @@
         mFrameData = buffer->data;
         mFrameSize = buffer->size;
         VTRACE("mFrameData = %p, mFrameSize = %d", mFrameData, mFrameSize);
-#if 0
-        testsize = *(uint32_t *)(buffer->data + buffer->size);
-        testdata = (uint8_t *)(buffer->data + buffer->size + sizeof(uint32_t));
-        for (int i = 0; i < testsize; i++) {
-            VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
-        }
-#endif
         num_nalus  = *(uint32_t *)(buffer->data + buffer->size + sizeof(uint32_t));
         VTRACE("num_nalus = %d", num_nalus);
         offset = 4;
@@ -160,7 +282,6 @@
             }
         }
         clear_data = mClearData;
-
     } else {
         VTRACE("Decoding clear video ...");
         mIsEncryptData = 0;
@@ -169,17 +290,47 @@
         clear_data = buffer->data;
         clear_data_size = buffer->size;
     }
+
     if (clear_data_size > 0) {
         status =  VideoDecoderBase::parseBuffer(
                 clear_data,
                 clear_data_size,
                 false,
-                (void**)&data);
+                (void**)data);
         CHECK_STATUS("VideoDecoderBase::parseBuffer");
     } else {
-        status =  VideoDecoderBase::queryBuffer((void**)&data);
+        status =  VideoDecoderBase::queryBuffer((void**)data);
         CHECK_STATUS("VideoDecoderBase::queryBuffer");
     }
+    return DECODE_SUCCESS;
+}
+
+Decode_Status VideoDecoderAVCSecure::decode(VideoDecodeBuffer *buffer) {
+    VTRACE("VideoDecoderAVCSecure::decode");
+    Decode_Status status;
+    vbp_data_h264 *data = NULL;
+    if (buffer == NULL) {
+        return DECODE_INVALID_DATA;
+    }
+
+#if 0
+    uint32_t testsize;
+    uint8_t *testdata;
+    testsize = buffer->size > 16 ? 16:buffer->size ;
+    testdata = (uint8_t *)(buffer->data);
+    for (int i = 0; i < 16; i++) {
+        VTRACE("testdata[%d] = 0x%x", i, testdata[i]);
+    }
+#endif
+
+    if (mModularMode) {
+        status = processModularInputBuffer(buffer,&data);
+        CHECK_STATUS("processModularInputBuffer");
+    }
+    else {
+        status = processClassicInputBuffer(buffer,&data);
+        CHECK_STATUS("processClassicInputBuffer");
+    }
 
     if (!mVAStarted) {
          if (data->has_sps && data->has_pps) {
@@ -190,6 +341,7 @@
             return DECODE_SUCCESS;
         }
     }
+
     status = decodeFrame(buffer, data);
 
     return status;
@@ -220,6 +372,15 @@
         CHECK_STATUS("handleNewSequence");
     }
 
+    if (mModularMode && (!mIsEncryptData)) {
+        if (data->pic_data[0].num_slices == 0) {
+            ITRACE("No slice available for decoding.");
+            status = mSizeChanged ? DECODE_FORMAT_CHANGE : DECODE_SUCCESS;
+            mSizeChanged = false;
+            return status;
+        }
+    }
+
     uint64_t lastPTS = mCurrentPTS;
     mCurrentPTS = buffer->timeStamp;
 
@@ -227,9 +388,13 @@
     status = acquireSurfaceBuffer();
     CHECK_STATUS("acquireSurfaceBuffer");
 
-    if (mFrameSize > 0) {
-        status = parseSliceHeader(buffer, data);
+    if (mModularMode) {
+        parseModularSliceHeader(buffer,data);
     }
+    else {
+        parseClassicSliceHeader(buffer,data);
+    }
+
     if (status != DECODE_SUCCESS) {
         endDecodingFrame(true);
         return status;
@@ -271,6 +436,11 @@
     mAcquiredBuffer->renderBuffer.timeStamp = mCurrentPTS;
     mAcquiredBuffer->pictureOrder = getPOC(picture);
 
+    if (mSizeChanged) {
+        mAcquiredBuffer->renderBuffer.flag |= IS_RESOLUTION_CHANGE;
+        mSizeChanged = false;
+    }
+
     status  = continueDecodingFrame(data);
     return status;
 }
@@ -309,7 +479,7 @@
     return DECODE_SUCCESS;
 }
 
-Decode_Status VideoDecoderAVCSecure::parseSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+Decode_Status VideoDecoderAVCSecure::parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
     Decode_Status status;
     VAStatus vaStatus;
 
@@ -317,6 +487,9 @@
     VABufferID pictureparameterparsingbufferID;
     VABufferID mSlicebufferID;
 
+    if (mFrameSize <= 0) {
+        return DECODE_SUCCESS;
+    }
     vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
     CHECK_VA_STATUS("vaBeginPicture");
 
@@ -415,6 +588,130 @@
     return DECODE_SUCCESS;
 }
 
+Decode_Status VideoDecoderAVCSecure::parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data) {
+    Decode_Status status;
+    VAStatus vaStatus;
+
+    VABufferID sliceheaderbufferID;
+    VABufferID pictureparameterparsingbufferID;
+    VABufferID mSlicebufferID;
+    int32_t sliceIdx;
+
+    vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface);
+    CHECK_VA_STATUS("vaBeginPicture");
+
+    if (mFrameSize <= 0 || mSliceNum <=0) {
+        return DECODE_SUCCESS;
+    }
+    void *sliceheaderbuf;
+    memset(mCachedHeader, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+    int32_t offset = 0;
+    int32_t size = 0;
+
+    for (sliceIdx = 0; sliceIdx < mSliceNum; sliceIdx++) {
+        vaStatus = vaCreateBuffer(
+            mVADisplay,
+            mVAContext,
+            VAParseSliceHeaderGroupBufferType,
+            MAX_SLICEHEADER_BUFFER_SIZE,
+            1,
+            NULL,
+            &sliceheaderbufferID);
+        CHECK_VA_STATUS("vaCreateSliceHeaderGroupBuffer");
+
+        vaStatus = vaMapBuffer(
+            mVADisplay,
+            sliceheaderbufferID,
+            &sliceheaderbuf);
+        CHECK_VA_STATUS("vaMapBuffer");
+
+        memset(sliceheaderbuf, 0, MAX_SLICEHEADER_BUFFER_SIZE);
+
+        vaStatus = vaUnmapBuffer(
+            mVADisplay,
+            sliceheaderbufferID);
+        CHECK_VA_STATUS("vaUnmapBuffer");
+
+        vaStatus = vaCreateBuffer(
+            mVADisplay,
+            mVAContext,
+            VASliceDataBufferType,
+            mSliceInfo[sliceIdx].sliceSize, //size
+            1,        //num_elements
+            mFrameData + mSliceInfo[sliceIdx].sliceStartOffset,
+            &mSlicebufferID);
+        CHECK_VA_STATUS("vaCreateSliceDataBuffer");
+
+        data->pic_parse_buffer->frame_buf_id = mSlicebufferID;
+        data->pic_parse_buffer->slice_headers_buf_id = sliceheaderbufferID;
+        data->pic_parse_buffer->frame_size = mSliceInfo[sliceIdx].sliceLength;
+        data->pic_parse_buffer->slice_headers_size = MAX_SLICEHEADER_BUFFER_SIZE;
+        data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte;
+        data->pic_parse_buffer->slice_offset = mSliceInfo[sliceIdx].sliceByteOffset;
+
+#if 0
+        VTRACE("data->pic_parse_buffer->slice_offset = 0x%x", data->pic_parse_buffer->slice_offset);
+        VTRACE("pic_parse_buffer->nalu_header.value = %x", data->pic_parse_buffer->nalu_header.value = mSliceInfo[sliceIdx].sliceHeaderByte);
+        VTRACE("flags.bits.frame_mbs_only_flag = %d", data->pic_parse_buffer->flags.bits.frame_mbs_only_flag);
+        VTRACE("flags.bits.pic_order_present_flag = %d", data->pic_parse_buffer->flags.bits.pic_order_present_flag);
+        VTRACE("flags.bits.delta_pic_order_always_zero_flag = %d", data->pic_parse_buffer->flags.bits.delta_pic_order_always_zero_flag);
+        VTRACE("flags.bits.redundant_pic_cnt_present_flag = %d", data->pic_parse_buffer->flags.bits.redundant_pic_cnt_present_flag);
+        VTRACE("flags.bits.weighted_pred_flag = %d", data->pic_parse_buffer->flags.bits.weighted_pred_flag);
+        VTRACE("flags.bits.entropy_coding_mode_flag = %d", data->pic_parse_buffer->flags.bits.entropy_coding_mode_flag);
+        VTRACE("flags.bits.deblocking_filter_control_present_flag = %d", data->pic_parse_buffer->flags.bits.deblocking_filter_control_present_flag);
+        VTRACE("flags.bits.weighted_bipred_idc = %d", data->pic_parse_buffer->flags.bits.weighted_bipred_idc);
+        VTRACE("pic_parse_buffer->expected_pic_parameter_set_id = %d", data->pic_parse_buffer->expected_pic_parameter_set_id);
+        VTRACE("pic_parse_buffer->num_slice_groups_minus1 = %d", data->pic_parse_buffer->num_slice_groups_minus1);
+        VTRACE("pic_parse_buffer->chroma_format_idc = %d", data->pic_parse_buffer->chroma_format_idc);
+        VTRACE("pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4 = %d", data->pic_parse_buffer->log2_max_pic_order_cnt_lsb_minus4);
+        VTRACE("pic_parse_buffer->pic_order_cnt_type = %d", data->pic_parse_buffer->pic_order_cnt_type);
+        VTRACE("pic_parse_buffer->residual_colour_transform_flag = %d", data->pic_parse_buffer->residual_colour_transform_flag);
+        VTRACE("pic_parse_buffer->num_ref_idc_l0_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l0_active_minus1);
+        VTRACE("pic_parse_buffer->num_ref_idc_l1_active_minus1 = %d", data->pic_parse_buffer->num_ref_idc_l1_active_minus1);
+#endif
+        vaStatus = vaCreateBuffer(
+            mVADisplay,
+            mVAContext,
+            VAParsePictureParameterBufferType,
+            sizeof(VAParsePictureParameterBuffer),
+            1,
+            data->pic_parse_buffer,
+            &pictureparameterparsingbufferID);
+        CHECK_VA_STATUS("vaCreatePictureParameterParsingBuffer");
+
+        vaStatus = vaRenderPicture(
+            mVADisplay,
+            mVAContext,
+            &pictureparameterparsingbufferID,
+            1);
+        CHECK_VA_STATUS("vaRenderPicture");
+
+        vaStatus = vaMapBuffer(
+            mVADisplay,
+            sliceheaderbufferID,
+            &sliceheaderbuf);
+        CHECK_VA_STATUS("vaMapBuffer");
+
+        size = *(uint32 *)((uint8 *)sliceheaderbuf + 4) + 4;
+        VTRACE("slice header size = 0x%x, offset = 0x%x", size, offset);
+        if (offset + size <= MAX_SLICEHEADER_BUFFER_SIZE - 4) {
+            memcpy(mCachedHeader+offset, sliceheaderbuf, size);
+            offset += size;
+        } else {
+            WTRACE("Cached slice header is not big enough!");
+        }
+        vaStatus = vaUnmapBuffer(
+            mVADisplay,
+            sliceheaderbufferID);
+        CHECK_VA_STATUS("vaUnmapBuffer");
+    }
+    memset(mCachedHeader + offset, 0xFF, 4);
+    status = updateSliceParameter(data,mCachedHeader);
+    CHECK_STATUS("processSliceHeader");
+    return DECODE_SUCCESS;
+}
+
+
 Decode_Status VideoDecoderAVCSecure::updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf) {
     VTRACE("VideoDecoderAVCSecure::updateSliceParameter");
     Decode_Status status;
@@ -437,6 +734,8 @@
     vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]);
     VAPictureParameterBufferH264 *picParam = picData->pic_parms;
     VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms);
+    uint32_t slice_data_size = 0;
+    uint8_t* slice_data_addr = NULL;
 
     if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) {
         // either condition indicates start of a new frame
@@ -480,7 +779,21 @@
     status = setReference(sliceParam);
     CHECK_STATUS("setReference");
 
-    sliceParam->slice_data_size = mFrameSize;
+    if (mModularMode) {
+        if (mIsEncryptData) {
+            sliceParam->slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+            slice_data_size = mSliceInfo[sliceIndex].sliceSize;
+            slice_data_addr = mFrameData + mSliceInfo[sliceIndex].sliceStartOffset;
+        } else {
+            slice_data_size = sliceData->slice_size;
+            slice_data_addr = sliceData->buffer_addr + sliceData->slice_offset;
+        }
+    } else {
+        sliceParam->slice_data_size = mFrameSize;
+        slice_data_size = mFrameSize;
+        slice_data_addr = mFrameData;
+    }
+
     vaStatus = vaCreateBuffer(
         mVADisplay,
         mVAContext,
@@ -505,9 +818,9 @@
         mVADisplay,
         mVAContext,
         VASliceDataBufferType,
-        mFrameSize, //size
+        slice_data_size, //size
         1,        //num_elements
-        mFrameData,
+        slice_data_addr,
         &slicebufferID);
     CHECK_VA_STATUS("vaCreateSliceDataBuffer");
 
@@ -521,3 +834,34 @@
     return DECODE_SUCCESS;
 
 }
+
+Decode_Status VideoDecoderAVCSecure::getCodecSpecificConfigs(
+    VAProfile profile, VAConfigID *config)
+{
+    VAStatus vaStatus;
+    VAConfigAttrib attrib[2];
+
+    if (config == NULL) {
+        ETRACE("Invalid parameter!");
+        return DECODE_FAIL;
+    }
+
+    attrib[0].type = VAConfigAttribRTFormat;
+    attrib[0].value = VA_RT_FORMAT_YUV420;
+    attrib[1].type = VAConfigAttribDecSliceMode;
+    attrib[1].value = VA_DEC_SLICE_MODE_NORMAL;
+    if (mModularMode) {
+        attrib[1].value = VA_DEC_SLICE_MODE_SUBSAMPLE;
+    }
+
+    vaStatus = vaCreateConfig(
+            mVADisplay,
+            profile,
+            VAEntrypointVLD,
+            &attrib[0],
+            2,
+            config);
+    CHECK_VA_STATUS("vaCreateConfig");
+
+    return DECODE_SUCCESS;
+}
diff --git a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
index 6378243..458196e 100755
--- a/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
+++ b/videodecoder/securevideo/merrifield/VideoDecoderAVCSecure.h
@@ -41,15 +41,37 @@
     virtual Decode_Status decodeFrame(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
     virtual Decode_Status continueDecodingFrame(vbp_data_h264 *data);
     virtual Decode_Status beginDecodingFrame(vbp_data_h264 *data);
-    Decode_Status parseSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+    virtual Decode_Status getCodecSpecificConfigs(VAProfile profile, VAConfigID*config);
+    Decode_Status parseClassicSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+    Decode_Status parseModularSliceHeader(VideoDecodeBuffer *buffer, vbp_data_h264 *data);
+
     Decode_Status updateSliceParameter(vbp_data_h264 *data, void *sliceheaderbuf);
     virtual Decode_Status decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex);
 private:
+    Decode_Status processClassicInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
+    Decode_Status processModularInputBuffer(VideoDecodeBuffer *buffer, vbp_data_h264 **data);
     int32_t     mIsEncryptData;
     int32_t     mFrameSize;
     uint8_t*    mFrameData;
     uint8_t*    mClearData;
+    uint8_t*    mCachedHeader;
     int32_t     mFrameIdx;
+    int32_t     mModularMode;
+
+    enum {
+        MAX_SLICE_HEADER_NUM  = 256,
+    };
+    int32_t     mSliceNum;
+    // Information of Slices in the Modular DRM Mode
+    struct SliceInfo {
+        uint8_t     sliceHeaderByte;             //  first byte of the slice header
+        uint32_t    sliceStartOffset;            // offset of Slice unit in the firewalled buffer
+        uint32_t    sliceByteOffset;             // extra offset from the blockAligned slice offset
+        uint32_t    sliceSize;                   // block aligned length of slice unit
+        uint32_t    sliceLength;                 // actual size of the slice
+    };
+
+    SliceInfo mSliceInfo[MAX_SLICE_HEADER_NUM];
 };
 
 #endif
diff --git a/videodecoder/securevideo/merrifield/VideoFrameInfo.h b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
new file mode 100755
index 0000000..426dfca
--- /dev/null
+++ b/videodecoder/securevideo/merrifield/VideoFrameInfo.h
@@ -0,0 +1,20 @@
+#ifndef VIDEO_FRAME_INFO_H_
+#define VIDEO_FRAME_INFO_H_
+
+#define MAX_NUM_NALUS 16
+
+typedef struct {
+    uint8_t  type;      // nalu type + nal_ref_idc
+    uint32_t offset;    // offset to the pointer of the encrypted data
+    uint8_t* data;      // if the nalu is encrypted, this field is useless; if current NALU is SPS/PPS, data is the pointer to clear SPS/PPS data
+    uint32_t length;    // nalu length
+} nalu_info_t;
+
+typedef struct {
+    uint8_t* data;      // pointer to the encrypted data
+    uint32_t size;      // encrypted data size
+    uint32_t num_nalus; // number of NALU
+    nalu_info_t nalus[MAX_NUM_NALUS];
+} frame_info_t;
+
+#endif