Add RDO and recon of MOTION_VAR experiment
The decision framework in enc (without obmc mv refinement) and
reconstruction at both sides is implemented.
Performance gain: 2.229/2.000/1.915% lowres/midres/hdres
Encodinig time: +13%
Change-Id: I5ab634c17d7050f15ecf5d4145626d0eaa425058
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 17ebac5..e29867a 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -288,7 +288,7 @@
4, 4, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0 }
};
-void setup_obmc_mask(int length, const uint8_t *mask[2]) {
+void av1_setup_obmc_mask(int length, const uint8_t *mask[2]) {
switch (length) {
case 1:
mask[0] = obmc_mask_1[0];
@@ -326,12 +326,15 @@
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
-void av1_build_obmc_inter_prediction(
- AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col,
- int use_tmp_dst_buf, uint8_t *final_buf[MAX_MB_PLANE],
- int final_stride[MAX_MB_PLANE], uint8_t *above_pred_buf[MAX_MB_PLANE],
- int above_pred_stride[MAX_MB_PLANE], uint8_t *left_pred_buf[MAX_MB_PLANE],
- int left_pred_stride[MAX_MB_PLANE]) {
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int use_tmp_dst_buf,
+ uint8_t *final_buf[MAX_MB_PLANE],
+ const int final_stride[MAX_MB_PLANE],
+ uint8_t *above_pred_buf[MAX_MB_PLANE],
+ const int above_pred_stride[MAX_MB_PLANE],
+ uint8_t *left_pred_buf[MAX_MB_PLANE],
+ const int left_pred_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int plane, i, mi_step;
@@ -343,8 +346,8 @@
if (use_tmp_dst_buf) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
- int bw = (xd->n8_w << 3) >> pd->subsampling_x;
- int bh = (xd->n8_h << 3) >> pd->subsampling_y;
+ const int bw = (xd->n8_w << 3) >> pd->subsampling_x;
+ const int bh = (xd->n8_h << 3) >> pd->subsampling_y;
int row;
#if CONFIG_AOM_HIGHBITDEPTH
if (is_hbd) {
@@ -381,18 +384,19 @@
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
- int bw = (mi_step << 3) >> pd->subsampling_x;
- int bh = overlap >> pd->subsampling_y;
+ const int bw = (mi_step << 3) >> pd->subsampling_x;
+ const int bh = overlap >> pd->subsampling_y;
int row, col;
- int dst_stride = use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
+ const int dst_stride =
+ use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
uint8_t *dst = use_tmp_dst_buf
? &final_buf[plane][(i << 3) >> pd->subsampling_x]
: &pd->dst.buf[(i << 3) >> pd->subsampling_x];
- int tmp_stride = above_pred_stride[plane];
+ const int tmp_stride = above_pred_stride[plane];
uint8_t *tmp = &above_pred_buf[plane][(i << 3) >> pd->subsampling_x];
const uint8_t *mask[2];
- setup_obmc_mask(bh, mask);
+ av1_setup_obmc_mask(bh, mask);
#if CONFIG_AOM_HIGHBITDEPTH
if (is_hbd) {
@@ -442,20 +446,21 @@
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
- int bw = overlap >> pd->subsampling_x;
- int bh = (mi_step << 3) >> pd->subsampling_y;
+ const int bw = overlap >> pd->subsampling_x;
+ const int bh = (mi_step << 3) >> pd->subsampling_y;
int row, col;
- int dst_stride = use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
+ const int dst_stride =
+ use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
uint8_t *dst =
use_tmp_dst_buf
? &final_buf[plane][((i << 3) >> pd->subsampling_y) * dst_stride]
: &pd->dst.buf[((i << 3) >> pd->subsampling_y) * dst_stride];
- int tmp_stride = left_pred_stride[plane];
+ const int tmp_stride = left_pred_stride[plane];
uint8_t *tmp =
&left_pred_buf[plane][((i << 3) >> pd->subsampling_y) * tmp_stride];
const uint8_t *mask[2];
- setup_obmc_mask(bw, mask);
+ av1_setup_obmc_mask(bw, mask);
#if CONFIG_AOM_HIGHBITDEPTH
if (is_hbd) {
@@ -490,7 +495,7 @@
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]) {
+ const int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
@@ -566,7 +571,7 @@
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]) {
+ const int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
@@ -638,4 +643,48 @@
}
xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
}
+
+void av1_build_obmc_inter_predictors_sb(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col) {
+#if CONFIG_AOM_HIGHBITDEPTH
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+#else
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
+ DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
+ const int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE,
+ MAX_SB_SIZE };
+ const int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE,
+ MAX_SB_SIZE };
+
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ int len = sizeof(uint16_t);
+ dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
+ dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * len);
+ dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * 2 * len);
+ dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
+ dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
+ dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
+ } else {
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ dst_buf1[0] = tmp_buf1;
+ dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
+ dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
+ dst_buf2[0] = tmp_buf2;
+ dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
+ dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
+#if CONFIG_AOM_HIGHBITDEPTH
+ }
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1);
+ av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_stride2);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, 0, NULL, NULL,
+ dst_buf1, dst_stride1, dst_buf2, dst_stride2);
+}
#endif // CONFIG_MOTION_VAR
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index a29b2b5..4bfaa16 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -197,21 +197,26 @@
const struct scale_factors *sf);
#if CONFIG_MOTION_VAR
-void setup_obmc_mask(int length, const uint8_t *mask[2]);
-void av1_build_obmc_inter_prediction(
- AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col,
- int use_tmp_dst_buf, uint8_t *final_buf[MAX_MB_PLANE],
- int final_stride[MAX_MB_PLANE], uint8_t *above_pred_buf[MAX_MB_PLANE],
- int above_pred_stride[MAX_MB_PLANE], uint8_t *left_pred_buf[MAX_MB_PLANE],
- int left_pred_stride[MAX_MB_PLANE]);
+void av1_setup_obmc_mask(int length, const uint8_t *mask[2]);
+void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ int use_tmp_dst_buf,
+ uint8_t *final_buf[MAX_MB_PLANE],
+ const int final_stride[MAX_MB_PLANE],
+ uint8_t *above_pred_buf[MAX_MB_PLANE],
+ const int above_pred_stride[MAX_MB_PLANE],
+ uint8_t *left_pred_buf[MAX_MB_PLANE],
+ const int left_pred_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]);
+ const int tmp_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
- int tmp_stride[MAX_MB_PLANE]);
+ const int tmp_stride[MAX_MB_PLANE]);
+void av1_build_obmc_inter_predictors_sb(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col);
#endif // CONFIG_MOTION_VAR
#ifdef __cplusplus
} // extern "C"
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index f597c8a..ef45392 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -510,6 +510,10 @@
} else {
// Prediction
av1_build_inter_predictors_sb(xd, mi_row, mi_col, AOMMAX(bsize, BLOCK_8X8));
+#if CONFIG_MOTION_VAR
+ if (mbmi->motion_mode == OBMC_CAUSAL)
+ av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
+#endif // CONFIG_MOTION_VAR
// Reconstruction
if (!mbmi->skip) {
int eobtotal = 0;
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 1c4751b..4d2ff43 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -2957,6 +2957,11 @@
av1_build_inter_predictors_sbuv(xd, mi_row, mi_col,
AOMMAX(bsize, BLOCK_8X8));
+#if CONFIG_MOTION_VAR
+ if (mbmi->motion_mode == OBMC_CAUSAL)
+ av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
+#endif // CONFIG_MOTION_VAR
+
av1_encode_sb(x, AOMMAX(bsize, BLOCK_8X8));
av1_tokenize_sb(cpi, td, t, !output_enabled, AOMMAX(bsize, BLOCK_8X8));
}
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index c2240a7..226e7aa 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -461,6 +461,9 @@
int mbmode_cost[INTRA_MODES];
unsigned int inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES];
+#if CONFIG_MOTION_VAR
+ int motion_mode_cost[BLOCK_SIZES][MOTION_MODES];
+#endif // CONFIG_MOTION_VAR
int intra_uv_mode_cost[INTRA_MODES][INTRA_MODES];
int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 889c66b..6e38d74 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -348,6 +348,11 @@
av1_cost_tokens((int *)cpi->inter_mode_cost[i],
cm->fc->inter_mode_probs[i], av1_inter_mode_tree);
#endif
+#if CONFIG_MOTION_VAR
+ for (i = 0; i < BLOCK_SIZES; i++)
+ av1_cost_tokens((int *)cpi->motion_mode_cost[i],
+ cm->fc->motion_mode_prob[i], av1_motion_mode_tree);
+#endif // CONFIG_MOTION_VAR
}
}
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 903bec0..d77fd58 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -2768,7 +2768,12 @@
AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
- int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
+ int mi_col,
+#if CONFIG_MOTION_VAR
+ uint8_t *above_pred_buf[3], int above_pred_stride[3],
+ uint8_t *left_pred_buf[3], int left_pred_stride[3],
+#endif // CONFIG_MOTION_VAR
+ int_mv single_newmv[MAX_REF_FRAMES],
InterpFilter (*single_filter)[MAX_REF_FRAMES],
int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
@@ -2812,6 +2817,13 @@
int64_t skip_sse_sb = INT64_MAX;
int64_t distortion_y = 0, distortion_uv = 0;
int16_t mode_ctx = mbmi_ext->mode_context[refs[0]];
+#if CONFIG_MOTION_VAR
+ int allow_motion_variation = is_motion_variation_allowed(mbmi);
+ int rate2_nocoeff, best_rate2 = INT_MAX, best_skippable, best_xskip,
+ best_disable_skip = 0;
+ int64_t best_distortion = INT64_MAX;
+ MB_MODE_INFO best_mbmi;
+#endif // CONFIG_MOTION_VAR
#if CONFIG_REF_MV
mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context, mbmi->ref_frame,
@@ -3103,10 +3115,38 @@
}
if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
+#if CONFIG_MOTION_VAR
+ rate2_nocoeff = *rate2;
+#endif // CONFIG_MOTION_VAR
memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
memcpy(x->bsse, bsse, sizeof(bsse));
+#if CONFIG_MOTION_VAR
+ best_rd = INT64_MAX;
+ for (mbmi->motion_mode = SIMPLE_TRANSLATION;
+ mbmi->motion_mode < (allow_motion_variation ? MOTION_MODES : 1);
+ mbmi->motion_mode++) {
+ int64_t tmp_rd, tmp_dist;
+ int tmp_rate;
+ int tmp_rate2 = rate2_nocoeff;
+
+ if (mbmi->motion_mode == OBMC_CAUSAL) {
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, 0, NULL, NULL,
+ above_pred_buf, above_pred_stride,
+ left_pred_buf, left_pred_stride);
+ model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
+ &skip_sse_sb);
+ }
+
+ x->skip = 0;
+
+ *rate2 = tmp_rate2;
+ if (allow_motion_variation)
+ *rate2 += cpi->motion_mode_cost[bsize][mbmi->motion_mode];
+ *distortion = 0;
+#endif // CONFIG_MOTION_VAR
+
if (!skip_txfm_sb) {
int skippable_y, skippable_uv;
int64_t sseuv = INT64_MAX;
@@ -3120,8 +3160,16 @@
if (*rate_y == INT_MAX) {
*rate2 = INT_MAX;
*distortion = INT64_MAX;
+#if CONFIG_MOTION_VAR
+ if (mbmi->motion_mode != SIMPLE_TRANSLATION) {
+ continue;
+ } else {
+#endif // CONFIG_MOTION_VAR
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return INT64_MAX;
+#if CONFIG_MOTION_VAR
+ }
+#endif // CONFIG_MOTION_VAR
}
*rate2 += *rate_y;
@@ -3134,24 +3182,86 @@
&sseuv, bsize, ref_best_rd - rdcosty)) {
*rate2 = INT_MAX;
*distortion = INT64_MAX;
+#if CONFIG_MOTION_VAR
+ continue;
+#else
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return INT64_MAX;
+#endif // CONFIG_MOTION_VAR
}
*psse += sseuv;
*rate2 += *rate_uv;
*distortion += distortion_uv;
*skippable = skippable_y && skippable_uv;
+#if CONFIG_MOTION_VAR
+ if (*skippable) {
+ *rate2 -= *rate_uv + *rate_y;
+ *rate_y = 0;
+ *rate_uv = 0;
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
+ mbmi->skip = 0;
+ // here mbmi->skip temporarily plays a role as what this_skip2 does
+ } else if (!xd->lossless[mbmi->segment_id] &&
+ (RDCOST(x->rdmult, x->rddiv,
+ *rate_y + *rate_uv +
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 0),
+ *distortion) >=
+ RDCOST(x->rdmult, x->rddiv,
+ av1_cost_bit(av1_get_skip_prob(cm, xd), 1), *psse))) {
+ *rate2 -= *rate_uv + *rate_y;
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
+ *distortion = *psse;
+ *rate_y = 0;
+ *rate_uv = 0;
+ mbmi->skip = 1;
+ } else {
+ *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
+ mbmi->skip = 0;
+ }
+ *disable_skip = 0;
+#endif // CONFIG_MOTION_VAR
} else {
x->skip = 1;
*disable_skip = 1;
+#if CONFIG_MOTION_VAR
+ mbmi->skip = 0;
+#endif // CONFIG_MOTION_VAR
+
// The cost of skip bit needs to be added.
*rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
*distortion = skip_sse_sb;
}
+#if CONFIG_MOTION_VAR
+ tmp_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+ if (mbmi->motion_mode == SIMPLE_TRANSLATION || (tmp_rd < best_rd)) {
+ best_mbmi = *mbmi;
+ best_rd = tmp_rd;
+ best_rate2 = *rate2;
+ best_distortion = *distortion;
+ best_skippable = *skippable;
+ best_xskip = x->skip;
+ best_disable_skip = *disable_skip;
+ }
+ }
+
+ if (best_rd == INT64_MAX) {
+ *rate2 = INT_MAX;
+ *distortion = INT64_MAX;
+ restore_dst_buf(xd, orig_dst, orig_dst_stride);
+ return INT64_MAX;
+ }
+ *mbmi = best_mbmi;
+ *rate2 = best_rate2;
+ *distortion = best_distortion;
+ *skippable = best_skippable;
+ x->skip = best_xskip;
+ *disable_skip = best_disable_skip;
+#endif // CONFIG_MOTION_VAR
+
if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
restore_dst_buf(xd, orig_dst, orig_dst_stride);
@@ -3806,9 +3916,12 @@
#endif
this_rd = handle_inter_mode(
cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
- &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
- single_inter_filter, single_skippable, &total_sse, best_rd,
- &mask_filter, filter_cache);
+ &disable_skip, frame_mv, mi_row, mi_col,
+#if CONFIG_MOTION_VAR
+ dst_buf1, dst_stride1, dst_buf2, dst_stride2,
+#endif // CONFIG_MOTION_VAR
+ single_newmv, single_inter_filter, single_skippable, &total_sse,
+ best_rd, &mask_filter, filter_cache);
#if CONFIG_REF_MV
if ((mbmi->mode == NEARMV &&
@@ -3890,6 +4003,9 @@
tmp_alt_rd = handle_inter_mode(
cpi, x, bsize, &tmp_rate, &tmp_dist, &tmp_skip, &tmp_rate_y,
&tmp_rate_uv, &dummy_disable_skip, frame_mv, mi_row, mi_col,
+#if CONFIG_MOTION_VAR
+ dst_buf1, dst_stride1, dst_buf2, dst_stride2,
+#endif // CONFIG_MOTION_VAR
dummy_single_newmv, dummy_single_inter_filter,
dummy_single_skippable, &tmp_sse, best_rd, &dummy_mask_filter,
dummy_filter_cache);
@@ -3912,6 +4028,9 @@
}
if (tmp_alt_rd < INT64_MAX) {
+#if CONFIG_MOTION_VAR
+ tmp_alt_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+#else
if (RDCOST(x->rdmult, x->rddiv,
tmp_rate_y + tmp_rate_uv + rate_skip0, tmp_dist) <
RDCOST(x->rdmult, x->rddiv, rate_skip1, tmp_sse))
@@ -3921,6 +4040,7 @@
tmp_alt_rd = RDCOST(
x->rdmult, x->rddiv,
tmp_rate + rate_skip1 - tmp_rate_y - tmp_rate_uv, tmp_sse);
+#endif // CONFIG_MOTION_VAR
}
if (tmp_ref_rd > tmp_alt_rd) {
@@ -3969,7 +4089,11 @@
rate2 += ref_costs_single[ref_frame];
}
+#if CONFIG_MOTION_VAR
+ if (ref_frame == INTRA_FRAME) {
+#else
if (!disable_skip) {
+#endif // CONFIG_MOTION_VAR
if (skippable) {
// Back out the coefficient coding costs
rate2 -= (rate_y + rate_uv);
@@ -4002,6 +4126,11 @@
// Calculate the final RD estimate for this mode.
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+#if CONFIG_MOTION_VAR
+ } else {
+ this_skip2 = mbmi->skip;
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+#endif // CONFIG_MOTION_VAR
}
// Apply an adjustment to the rd value based on the similarity of the