Code cleanup: mainly rd_pick_partition and methods called from there.
- Const correctness
- Refactoring
- Make variables local when possible etc
- Remove -Wcast-qual to allow explicitly casting away const.
Cherry-picked from aomedia/master: c27fcccc
And then a number of more const correctness changes to make sure other
experiments build OK.
Change-Id: I77c18d99d21218fbdc9b186d7ed3792dc401a0a0
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index b16f512..cc990ea 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -357,7 +357,7 @@
FRAME_CONTEXT *fc;
/* pointers to reference frames */
- RefBuffer *block_refs[2];
+ const RefBuffer *block_refs[2];
/* pointer to current frame */
const YV12_BUFFER_CONFIG *cur_buf;
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 4fdeefe..2344bc1 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -763,8 +763,8 @@
*near_mv = mvlist[1];
}
-void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
- int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+ int block, int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
CANDIDATE_MV *ref_mv_stack,
uint8_t *ref_mv_count,
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index b5e7094..25ebbfd 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -465,8 +465,8 @@
void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
int_mv *near_mv);
-void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
- int ref, int mi_row, int mi_col,
+void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+ int block, int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
CANDIDATE_MV *ref_mv_stack,
uint8_t *ref_mv_count,
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 8927f26..6b0a3d5 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-static INLINE int get_segment_id(const AV1_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *const cm,
const uint8_t *segment_ids, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index ae57a43..c0fc494 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -1170,7 +1170,7 @@
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
-void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *above[MAX_MB_PLANE],
int above_stride[MAX_MB_PLANE],
@@ -1281,7 +1281,7 @@
}
#endif // CONFIG_EXT_INTER
-void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
@@ -1319,8 +1319,8 @@
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(above_mbmi); ++ref) {
- MV_REFERENCE_FRAME frame = above_mbmi->ref_frame[ref];
- RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
+ const MV_REFERENCE_FRAME frame = above_mbmi->ref_frame[ref];
+ const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
@@ -1378,7 +1378,7 @@
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
}
-void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
@@ -1416,8 +1416,8 @@
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(left_mbmi); ++ref) {
- MV_REFERENCE_FRAME frame = left_mbmi->ref_frame[ref];
- RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
+ const MV_REFERENCE_FRAME frame = left_mbmi->ref_frame[ref];
+ const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 4f86354..bfa7e95 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -517,19 +517,19 @@
#if CONFIG_MOTION_VAR
const uint8_t *av1_get_obmc_mask(int length);
-void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *above[MAX_MB_PLANE],
int above_stride[MAX_MB_PLANE],
uint8_t *left[MAX_MB_PLANE],
int left_stride[MAX_MB_PLANE]);
-void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
int tmp_height[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]);
-void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
+void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
index 3c9c92f..5c4a5e3 100644
--- a/av1/encoder/aq_complexity.c
+++ b/av1/encoder/aq_complexity.c
@@ -111,9 +111,9 @@
// Select a segment for the current block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
-void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+void av1_caq_select_segment(const AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
int mi_row, int mi_col, int projected_rate) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
const int xmis = AOMMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
index 1d966ac..af525b3 100644
--- a/av1/encoder/aq_complexity.h
+++ b/av1/encoder/aq_complexity.h
@@ -22,7 +22,7 @@
struct macroblock;
// Select a segment for the current Block.
-void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
+void av1_caq_select_segment(const struct AV1_COMP *cpi, struct macroblock *,
BLOCK_SIZE bs, int mi_row, int mi_col,
int projected_rate);
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
index e0e4b88..bcf11a7 100644
--- a/av1/encoder/aq_cyclicrefresh.c
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -209,7 +209,7 @@
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(const AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip) {
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
index cdc9815..459ab80 100644
--- a/av1/encoder/aq_cyclicrefresh.h
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -49,7 +49,7 @@
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
-void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(const struct AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip);
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
index 1f5554e..01528ec 100644
--- a/av1/encoder/aq_variance.c
+++ b/av1/encoder/aq_variance.c
@@ -141,7 +141,7 @@
}
#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
+static unsigned int block_variance(const AV1_COMP *const cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
@@ -189,14 +189,14 @@
}
}
-double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
aom_clear_system_state();
return log(var + 1.0);
}
#define DEFAULT_E_MIDPOINT 10.0
-int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
aom_clear_system_state();
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
index 4900aa7..05725c5 100644
--- a/av1/encoder/aq_variance.h
+++ b/av1/encoder/aq_variance.h
@@ -21,8 +21,8 @@
unsigned int av1_vaq_segment_id(int energy);
void av1_vaq_frame_setup(AV1_COMP *cpi);
-int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
-double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
#ifdef __cplusplus
} // extern "C"
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 773e368..3b52e82 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -60,17 +60,18 @@
#define IF_HBD(...)
#endif // CONFIG_AOM_HIGHBITDEPTH
-static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- RUN_TYPE dry_run, int mi_row, int mi_col,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int *rate);
+static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
+ TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int *rate);
#if CONFIG_SUPERTX
static int check_intra_b(PICK_MODE_CONTEXT *ctx);
-static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
- int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree);
-static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
+static int check_intra_sb(const AV1_COMP *cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ PC_TREE *pc_tree);
+static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td,
#if CONFIG_EXT_INTER
int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
@@ -78,17 +79,17 @@
BLOCK_SIZE bsize_pred, int b_sub8x8, int block);
static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
PC_TREE *pc_tree);
-static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_ori, int mi_col_ori,
RUN_TYPE dry_run, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
int dst_stride[3], PC_TREE *pc_tree);
-static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize,
RUN_TYPE dry_run, PC_TREE *pc_tree);
-static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
TX_TYPE *best_tx, PC_TREE *pc_tree);
@@ -176,7 +177,7 @@
};
#endif // CONFIG_AOM_HIGHBITDEPTH
-unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs) {
unsigned int sse;
@@ -186,7 +187,7 @@
}
#if CONFIG_AOM_HIGHBITDEPTH
-unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
@@ -212,7 +213,7 @@
}
#endif // CONFIG_AOM_HIGHBITDEPTH
-static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
+static unsigned int get_sby_perpixel_diff_variance(const AV1_COMP *const cpi,
const struct buf_2d *ref,
int mi_row, int mi_col,
BLOCK_SIZE bs) {
@@ -243,21 +244,21 @@
// Lighter version of set_offsets that only sets the mode info
// pointers.
-static void set_mode_info_offsets(AV1_COMP *const cpi, MACROBLOCK *const x,
- MACROBLOCKD *const xd, int mi_row,
- int mi_col) {
- AV1_COMMON *const cm = &cpi->common;
+static void set_mode_info_offsets(const AV1_COMP *const cpi,
+ MACROBLOCK *const x, MACROBLOCKD *const xd,
+ int mi_row, int mi_col) {
+ const AV1_COMMON *const cm = &cpi->common;
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
-static void set_offsets_without_segment_id(AV1_COMP *cpi,
+static void set_offsets_without_segment_id(const AV1_COMP *const cpi,
const TileInfo *const tile,
MACROBLOCK *const x, int mi_row,
int mi_col, BLOCK_SIZE bsize) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -299,10 +300,10 @@
xd->tile = *tile;
}
-static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
+static void set_offsets(const AV1_COMP *const cpi, const TileInfo *const tile,
MACROBLOCK *const x, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const struct segmentation *const seg = &cm->seg;
@@ -332,11 +333,11 @@
}
#if CONFIG_SUPERTX
-static void set_offsets_supertx(AV1_COMP *cpi, ThreadData *td,
+static void set_offsets_supertx(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &td->mb;
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -349,7 +350,7 @@
cm->mi_cols);
}
-static void set_offsets_extend(AV1_COMP *cpi, ThreadData *td,
+static void set_offsets_extend(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row_pred,
int mi_col_pred, int mi_row_ori, int mi_col_ori,
BLOCK_SIZE bsize_pred) {
@@ -357,7 +358,7 @@
// (mi_row_ori, mi_col_ori, bsize_ori): region for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
MACROBLOCK *const x = &td->mb;
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_width = num_8x8_blocks_wide_lookup[bsize_pred];
const int mi_height = num_8x8_blocks_high_lookup[bsize_pred];
@@ -973,7 +974,7 @@
}
#if CONFIG_DUAL_FILTER
-static void reset_intmv_filter_type(AV1_COMMON *cm, MACROBLOCKD *xd,
+static void reset_intmv_filter_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi) {
int dir;
for (dir = 0; dir < 2; ++dir) {
@@ -1011,11 +1012,11 @@
}
#endif // CONFIG_GLOBAL_MOTION
-static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- RUN_TYPE dry_run) {
+static void update_state(const AV1_COMP *const cpi, ThreadData *td,
+ PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, RUN_TYPE dry_run) {
int i, x_idx, y;
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1132,18 +1133,22 @@
if (dry_run) return;
#if CONFIG_INTERNAL_STATS
- if (frame_is_intra_only(cm)) {
- static const int kf_mode_index[] = {
- THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
- THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
- THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
- THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
- THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
- };
- ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
- } else {
- // Note how often each mode chosen as best
- ++cpi->mode_chosen_counts[ctx->best_mode_index];
+ {
+ unsigned int *const mode_chosen_counts =
+ (unsigned int *)cpi->mode_chosen_counts; // Cast const away.
+ if (frame_is_intra_only(cm)) {
+ static const int kf_mode_index[] = {
+ THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
+ THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
+ };
+ ++mode_chosen_counts[kf_mode_index[mbmi->mode]];
+ } else {
+ // Note how often each mode chosen as best
+ ++mode_chosen_counts[ctx->best_mode_index];
+ }
}
#endif
if (!frame_is_intra_only(cm)) {
@@ -1196,14 +1201,14 @@
}
#if CONFIG_SUPERTX
-static void update_state_supertx(AV1_COMP *cpi, ThreadData *td,
+static void update_state_supertx(const AV1_COMP *const cpi, ThreadData *td,
PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
BLOCK_SIZE bsize, RUN_TYPE dry_run) {
int y, x_idx;
#if CONFIG_VAR_TX || CONFIG_REF_MV
int i;
#endif
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1341,11 +1346,11 @@
}
}
-static void update_state_sb_supertx(AV1_COMP *cpi, ThreadData *td,
+static void update_state_sb_supertx(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, BLOCK_SIZE bsize,
RUN_TYPE dry_run, PC_TREE *pc_tree) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = x->plane;
@@ -1497,10 +1502,11 @@
ctx->mic.mbmi.tx_type = best_tx;
}
-static void update_supertx_param_sb(AV1_COMP *cpi, ThreadData *td, int mi_row,
- int mi_col, BLOCK_SIZE bsize, int best_tx,
- TX_SIZE supertx_size, PC_TREE *pc_tree) {
- AV1_COMMON *const cm = &cpi->common;
+static void update_supertx_param_sb(const AV1_COMP *const cpi, ThreadData *td,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int best_tx, TX_SIZE supertx_size,
+ PC_TREE *pc_tree) {
+ const AV1_COMMON *const cm = &cpi->common;
int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
PARTITION_TYPE partition = pc_tree->partitioning;
BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -1584,10 +1590,10 @@
x->e_mbd.plane[i].subsampling_y);
}
-static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
+static int set_segment_rdmult(const AV1_COMP *const cpi, MACROBLOCK *const x,
int8_t segment_id) {
int segment_qindex;
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
av1_init_plane_quantizers(cpi, x, segment_id);
aom_clear_system_state();
segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
@@ -1595,7 +1601,7 @@
return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
-static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
+static void rd_pick_sb_modes(const AV1_COMP *const cpi, TileDataEnc *tile_data,
MACROBLOCK *const x, int mi_row, int mi_col,
RD_COST *rd_cost,
#if CONFIG_SUPERTX
@@ -1606,7 +1612,7 @@
#endif
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
@@ -1787,7 +1793,7 @@
}
#endif
-static void update_stats(AV1_COMMON *cm, ThreadData *td
+static void update_stats(const AV1_COMMON *const cm, ThreadData *td
#if CONFIG_SUPERTX
,
int supertx_enabled
@@ -2099,9 +2105,9 @@
#endif
}
-static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
- TOKENEXTRA **tp, int mi_row, int mi_col, RUN_TYPE dry_run,
- BLOCK_SIZE bsize,
+static void encode_b(const AV1_COMP *const cpi, const TileInfo *const tile,
+ ThreadData *td, TOKENEXTRA **tp, int mi_row, int mi_col,
+ RUN_TYPE dry_run, BLOCK_SIZE bsize,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif
@@ -2123,9 +2129,10 @@
}
}
-static void encode_sb(AV1_COMP *cpi, ThreadData *td, const TileInfo *const tile,
- TOKENEXTRA **tp, int mi_row, int mi_col, RUN_TYPE dry_run,
- BLOCK_SIZE bsize, PC_TREE *pc_tree, int *rate) {
+static void encode_sb(const AV1_COMP *const cpi, ThreadData *td,
+ const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
+ int mi_col, RUN_TYPE dry_run, BLOCK_SIZE bsize,
+ PC_TREE *pc_tree, int *rate) {
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2933,26 +2940,25 @@
}
// TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+static void set_partition_range(const AV1_COMMON *const cm,
+ const MACROBLOCKD *const xd, int mi_row,
int mi_col, BLOCK_SIZE bsize,
- BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
- int mi_width = num_8x8_blocks_wide_lookup[bsize];
- int mi_height = num_8x8_blocks_high_lookup[bsize];
+ BLOCK_SIZE *const min_bs,
+ BLOCK_SIZE *const max_bs) {
+ const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ const int mi_height = num_8x8_blocks_high_lookup[bsize];
int idx, idy;
- MODE_INFO *mi;
const int idx_str = cm->mi_stride * mi_row + mi_col;
- MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
- BLOCK_SIZE bs, min_size, max_size;
-
- min_size = BLOCK_LARGEST;
- max_size = BLOCK_4X4;
+ MODE_INFO **const prev_mi = &cm->prev_mi_grid_visible[idx_str];
+ BLOCK_SIZE min_size = BLOCK_64X64; // default values
+ BLOCK_SIZE max_size = BLOCK_4X4;
if (prev_mi) {
for (idy = 0; idy < mi_height; ++idy) {
for (idx = 0; idx < mi_width; ++idx) {
- mi = prev_mi[idy * cm->mi_stride + idx];
- bs = mi ? mi->mbmi.sb_type : bsize;
+ const MODE_INFO *const mi = prev_mi[idy * cm->mi_stride + idx];
+ const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
@@ -2961,8 +2967,8 @@
if (xd->left_available) {
for (idy = 0; idy < mi_height; ++idy) {
- mi = xd->mi[idy * cm->mi_stride - 1];
- bs = mi ? mi->mbmi.sb_type : bsize;
+ const MODE_INFO *const mi = xd->mi[idy * cm->mi_stride - 1];
+ const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
@@ -2970,8 +2976,8 @@
if (xd->up_available) {
for (idx = 0; idx < mi_width; ++idx) {
- mi = xd->mi[idx - cm->mi_stride];
- bs = mi ? mi->mbmi.sb_type : bsize;
+ const MODE_INFO *const mi = xd->mi[idx - cm->mi_stride];
+ const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
@@ -3094,10 +3100,10 @@
#if CONFIG_EXT_PARTITION_TYPES
static void rd_test_partition3(
- AV1_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
- PC_TREE *pc_tree, RD_COST *best_rdc, PICK_MODE_CONTEXT ctxs[3],
- PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, BLOCK_SIZE bsize,
- PARTITION_TYPE partition,
+ const AV1_COMP *const cpi, ThreadData *td, TileDataEnc *tile_data,
+ TOKENEXTRA **tp, PC_TREE *pc_tree, RD_COST *best_rdc,
+ PICK_MODE_CONTEXT ctxs[3], PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, PARTITION_TYPE partition,
#if CONFIG_SUPERTX
int64_t best_rd, int *best_rate_nocoef, RD_SEARCH_MACROBLOCK_CONTEXT *x_ctx,
#endif
@@ -3107,7 +3113,7 @@
MACROBLOCKD *const xd = &x->e_mbd;
RD_COST this_rdc, sum_rdc;
#if CONFIG_SUPERTX
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
int this_rate_nocoef, sum_rate_nocoef;
int abort_flag;
@@ -3274,7 +3280,7 @@
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
-static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
+static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td,
TileDataEnc *tile_data, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE bsize,
RD_COST *rd_cost,
@@ -3282,17 +3288,16 @@
int *rate_nocoef,
#endif
int64_t best_rd, PC_TREE *pc_tree) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
RD_SEARCH_MACROBLOCK_CONTEXT x_ctx;
- TOKENEXTRA *tp_orig = *tp;
+ const TOKENEXTRA *const tp_orig = *tp;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
- int i;
const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- int *partition_cost = cpi->partition_cost[pl];
+ const int *partition_cost = cpi->partition_cost[pl];
int tmp_partition_cost[PARTITION_TYPES];
BLOCK_SIZE subsize;
RD_COST this_rdc, sum_rdc, best_rdc;
@@ -3303,8 +3308,9 @@
bsize <= MAX_SUPERTX_BLOCK_SIZE &&
!xd->lossless[0];
#endif // CONFIG_SUPERTX
- int do_split = bsize >= BLOCK_8X8;
- int do_rect = 1;
+ const int bsize_at_least_8x8 = (bsize >= BLOCK_8X8);
+ int do_square_split = bsize_at_least_8x8;
+ int do_rectangular_split = 1;
#if CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
#endif
@@ -3325,9 +3331,9 @@
int partition_none_allowed = !force_horz_split && !force_vert_split;
int partition_horz_allowed =
- !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+ !force_vert_split && yss <= xss && bsize_at_least_8x8;
int partition_vert_allowed =
- !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
+ !force_horz_split && xss <= yss && bsize_at_least_8x8;
(void)*tp_orig;
if (force_horz_split || force_vert_split) {
@@ -3377,7 +3383,7 @@
x->mb_energy = av1_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
- int cb_partition_search_ctrl =
+ const int cb_partition_search_ctrl =
((pc_tree->index == 0 || pc_tree->index == 3) +
get_chessboard_index(cm->current_video_frame)) &
0x1;
@@ -3389,12 +3395,13 @@
// Determine partition types in search according to the speed features.
// The threshold set here has to be of square block size.
if (cpi->sf.auto_min_max_partition_size) {
- partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
- partition_horz_allowed &=
- ((bsize <= max_size && bsize > min_size) || force_horz_split);
- partition_vert_allowed &=
- ((bsize <= max_size && bsize > min_size) || force_vert_split);
- do_split &= bsize > min_size;
+ const int no_partition_allowed = (bsize <= max_size && bsize >= min_size);
+ // Note: Further partitioning is NOT allowed when bsize == min_size already.
+ const int partition_allowed = (bsize <= max_size && bsize > min_size);
+ partition_none_allowed &= no_partition_allowed;
+ partition_horz_allowed &= partition_allowed || force_horz_split;
+ partition_vert_allowed &= partition_allowed || force_vert_split;
+ do_square_split &= bsize > min_size;
}
if (cpi->sf.use_square_partition_only) {
partition_horz_allowed &= force_horz_split;
@@ -3420,7 +3427,7 @@
#if CONFIG_FP_MB_STATS
// Decide whether we shall split directly and skip searching NONE by using
// the first pass block statistics
- if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
+ if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_square_split &&
partition_none_allowed && src_diff_var > 4 &&
cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
int mb_row = mi_row >> 1;
@@ -3479,7 +3486,7 @@
#endif
bsize, ctx, best_rdc.rdcost);
if (this_rdc.rate != INT_MAX) {
- if (bsize >= BLOCK_8X8) {
+ if (bsize_at_least_8x8) {
this_rdc.rate += partition_cost[PARTITION_NONE];
this_rdc.rdcost =
RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
@@ -3489,22 +3496,21 @@
}
if (this_rdc.rdcost < best_rdc.rdcost) {
- int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
- int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
+ // Adjust dist breakout threshold according to the partition size.
+ const int64_t dist_breakout_thr =
+ cpi->sf.partition_search_breakout_dist_thr >>
+ ((2 * (MAX_SB_SIZE_LOG2 - 2)) -
+ (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]));
+ const int rate_breakout_thr =
+ cpi->sf.partition_search_breakout_rate_thr *
+ num_pels_log2_lookup[bsize];
best_rdc = this_rdc;
#if CONFIG_SUPERTX
best_rate_nocoef = this_rate_nocoef;
assert(best_rate_nocoef >= 0);
#endif
- if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
-
- // Adjust dist breakout threshold according to the partition size.
- dist_breakout_thr >>=
- (2 * (MAX_SB_SIZE_LOG2 - 2)) -
- (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
-
- rate_breakout_thr *= num_pels_log2_lookup[bsize];
+ if (bsize_at_least_8x8) pc_tree->partitioning = PARTITION_NONE;
// If all y, u, v transform blocks in this partition are skippable, and
// the dist & rate are within the thresholds, the partition search is
@@ -3514,8 +3520,8 @@
if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] &&
(ctx->skippable && best_rdc.dist < dist_breakout_thr &&
best_rdc.rate < rate_breakout_thr)) {
- do_split = 0;
- do_rect = 0;
+ do_square_split = 0;
+ do_rectangular_split = 0;
}
#if CONFIG_FP_MB_STATS
@@ -3524,7 +3530,7 @@
// If that is the case, check the difference variance between the
// current frame and the last frame. If the variance is small enough,
// stop further splitting in RD optimization
- if (cpi->use_fp_mb_stats && do_split != 0 &&
+ if (cpi->use_fp_mb_stats && do_square_split &&
cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
int mb_row = mi_row >> 1;
int mb_col = mi_col >> 1;
@@ -3557,8 +3563,8 @@
cpi, &x->plane[0].src, mi_row, mi_col, bsize);
}
if (src_diff_var < 8) {
- do_split = 0;
- do_rect = 0;
+ do_square_split = 0;
+ do_rectangular_split = 0;
}
}
}
@@ -3575,10 +3581,10 @@
// PARTITION_SPLIT
// TODO(jingning): use the motion vectors given by the above search as
// the starting point of motion search in the following partition type check.
- if (do_split) {
+ if (do_square_split) {
+ int reached_last_index = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (bsize == BLOCK_8X8) {
- i = 4;
#if CONFIG_DUAL_FILTER
if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
pc_tree->leaf_split[0]->pred_interp_filter =
@@ -3649,29 +3655,31 @@
pc_tree->partitioning = best_partition;
}
#endif // CONFIG_SUPERTX
+ reached_last_index = 1;
} else {
+ int idx;
#if CONFIG_SUPERTX
- for (i = 0; i < 4 && sum_rdc.rdcost < INT64_MAX; ++i) {
+ for (idx = 0; idx < 4 && sum_rdc.rdcost < INT64_MAX; ++idx) {
#else
- for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
+ for (idx = 0; idx < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++idx) {
#endif // CONFIG_SUPERTX
- const int x_idx = (i & 1) * mi_step;
- const int y_idx = (i >> 1) * mi_step;
+ const int x_idx = (idx & 1) * mi_step;
+ const int y_idx = (idx >> 1) * mi_step;
if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
continue;
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
- pc_tree->split[i]->index = i;
+ pc_tree->split[idx]->index = idx;
#if CONFIG_SUPERTX
rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
mi_col + x_idx, subsize, &this_rdc, &this_rate_nocoef,
- INT64_MAX - sum_rdc.rdcost, pc_tree->split[i]);
+ INT64_MAX - sum_rdc.rdcost, pc_tree->split[idx]);
#else
- rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
- mi_col + x_idx, subsize, &this_rdc,
- best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
+ rd_pick_partition(
+ cpi, td, tile_data, tp, mi_row + y_idx, mi_col + x_idx, subsize,
+ &this_rdc, best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[idx]);
#endif // CONFIG_SUPERTX
if (this_rdc.rate == INT_MAX) {
@@ -3689,8 +3697,9 @@
#endif // CONFIG_SUPERTX
}
}
+ reached_last_index = (idx == 4);
#if CONFIG_SUPERTX
- if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && i == 4) {
+ if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && reached_last_index) {
TX_SIZE supertx_size = max_txsize_lookup[bsize];
const PARTITION_TYPE best_partition = pc_tree->partitioning;
@@ -3732,7 +3741,7 @@
#endif // CONFIG_SUPERTX
}
- if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
+ if (reached_last_index && sum_rdc.rdcost < best_rdc.rdcost) {
sum_rdc.rate += partition_cost[PARTITION_SPLIT];
sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
#if CONFIG_SUPERTX
@@ -3747,10 +3756,10 @@
#endif // CONFIG_SUPERTX
pc_tree->partitioning = PARTITION_SPLIT;
}
- } else {
+ } else if (cpi->sf.less_rectangular_check) {
// skip rectangular partition test when larger block size
// gives better rd cost
- if (cpi->sf.less_rectangular_check) do_rect &= !partition_none_allowed;
+ do_rectangular_split &= !partition_none_allowed;
}
restore_context(x, &x_ctx, mi_row, mi_col, bsize);
@@ -3758,7 +3767,7 @@
// PARTITION_HORZ
if (partition_horz_allowed &&
- (do_rect || av1_active_h_edge(cpi, mi_row, mi_step))) {
+ (do_rectangular_split || av1_active_h_edge(cpi, mi_row, mi_step))) {
subsize = get_subsize(bsize, PARTITION_HORZ);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
#if CONFIG_DUAL_FILTER
@@ -3787,7 +3796,7 @@
#else
if (sum_rdc.rdcost < best_rdc.rdcost &&
#endif // CONFIG_SUPERTX
- mi_row + mi_step < cm->mi_rows && bsize > BLOCK_8X8) {
+ !force_horz_split && bsize > BLOCK_8X8) {
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 1);
encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row, mi_col, subsize,
@@ -3896,7 +3905,7 @@
// PARTITION_VERT
if (partition_vert_allowed &&
- (do_rect || av1_active_v_edge(cpi, mi_col, mi_step))) {
+ (do_rectangular_split || av1_active_v_edge(cpi, mi_col, mi_step))) {
subsize = get_subsize(bsize, PARTITION_VERT);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
@@ -3925,7 +3934,7 @@
#else
if (sum_rdc.rdcost < best_rdc.rdcost &&
#endif // CONFIG_SUPERTX
- mi_col + mi_step < cm->mi_cols && bsize > BLOCK_8X8) {
+ !force_vert_split && bsize > BLOCK_8X8) {
update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 1);
encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row, mi_col, subsize,
&pc_tree->vertical[0], NULL);
@@ -4032,7 +4041,7 @@
#if CONFIG_EXT_PARTITION_TYPES
// PARTITION_HORZ_A
- if (partition_horz_allowed && do_rect && bsize > BLOCK_8X8 &&
+ if (partition_horz_allowed && do_rectangular_split && bsize > BLOCK_8X8 &&
partition_none_allowed) {
subsize = get_subsize(bsize, PARTITION_HORZ_A);
rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
@@ -4046,7 +4055,7 @@
restore_context(x, &x_ctx, mi_row, mi_col, bsize);
}
// PARTITION_HORZ_B
- if (partition_horz_allowed && do_rect && bsize > BLOCK_8X8 &&
+ if (partition_horz_allowed && do_rectangular_split && bsize > BLOCK_8X8 &&
partition_none_allowed) {
subsize = get_subsize(bsize, PARTITION_HORZ_B);
rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
@@ -4060,7 +4069,7 @@
restore_context(x, &x_ctx, mi_row, mi_col, bsize);
}
// PARTITION_VERT_A
- if (partition_vert_allowed && do_rect && bsize > BLOCK_8X8 &&
+ if (partition_vert_allowed && do_rectangular_split && bsize > BLOCK_8X8 &&
partition_none_allowed) {
subsize = get_subsize(bsize, PARTITION_VERT_A);
rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
@@ -4074,7 +4083,7 @@
restore_context(x, &x_ctx, mi_row, mi_col, bsize);
}
// PARTITION_VERT_B
- if (partition_vert_allowed && do_rect && bsize > BLOCK_8X8 &&
+ if (partition_vert_allowed && do_rectangular_split && bsize > BLOCK_8X8 &&
partition_none_allowed) {
subsize = get_subsize(bsize, PARTITION_VERT_B);
rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
@@ -4920,9 +4929,10 @@
}
}
-static void tx_partition_count_update(AV1_COMMON *cm, MACROBLOCKD *xd,
- BLOCK_SIZE plane_bsize, int mi_row,
- int mi_col, FRAME_COUNTS *td_counts) {
+static void tx_partition_count_update(const AV1_COMMON *const cm,
+ MACROBLOCKD *xd, BLOCK_SIZE plane_bsize,
+ int mi_row, int mi_col,
+ FRAME_COUNTS *td_counts) {
const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
@@ -4981,9 +4991,9 @@
}
}
-static void tx_partition_set_contexts(AV1_COMMON *cm, MACROBLOCKD *xd,
- BLOCK_SIZE plane_bsize, int mi_row,
- int mi_col) {
+static void tx_partition_set_contexts(const AV1_COMMON *const cm,
+ MACROBLOCKD *xd, BLOCK_SIZE plane_bsize,
+ int mi_row, int mi_col) {
const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
@@ -5001,11 +5011,11 @@
}
#endif
-static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- RUN_TYPE dry_run, int mi_row, int mi_col,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int *rate) {
- AV1_COMMON *const cm = &cpi->common;
+static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
+ TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int *rate) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
@@ -5271,8 +5281,9 @@
return 0;
}
-static int check_intra_sb(AV1_COMP *cpi, const TileInfo *const tile, int mi_row,
- int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
+static int check_intra_sb(const AV1_COMP *const cpi, const TileInfo *const tile,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ PC_TREE *pc_tree) {
const AV1_COMMON *const cm = &cpi->common;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
@@ -5381,7 +5392,7 @@
}
}
-static void predict_superblock(AV1_COMP *cpi, ThreadData *td,
+static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td,
#if CONFIG_EXT_INTER
int mi_row_ori, int mi_col_ori,
#endif // CONFIG_EXT_INTER
@@ -5390,7 +5401,7 @@
// Used in supertx
// (mi_row_ori, mi_col_ori): location for mv
// (mi_row_pred, mi_col_pred, bsize_pred): region to predict
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi_8x8 = xd->mi[0];
@@ -5422,7 +5433,7 @@
bsize_pred, block);
}
-static void predict_b_extend(AV1_COMP *cpi, ThreadData *td,
+static void predict_b_extend(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int block,
int mi_row_ori, int mi_col_ori, int mi_row_pred,
int mi_col_pred, int mi_row_top, int mi_col_top,
@@ -5438,7 +5449,7 @@
// bextend: 1: region to predict is an extension of ori; 0: not
MACROBLOCK *const x = &td->mb;
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int r = (mi_row_pred - mi_row_top) * MI_SIZE;
int c = (mi_col_pred - mi_col_top) * MI_SIZE;
@@ -5475,7 +5486,7 @@
if (!dry_run && !bextend) update_stats(&cpi->common, td, 1);
}
-static void extend_dir(AV1_COMP *cpi, ThreadData *td,
+static void extend_dir(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int block, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, int mi_row, int mi_col,
int mi_row_top, int mi_col_top, RUN_TYPE dry_run,
@@ -5547,7 +5558,7 @@
}
}
-static void extend_all(AV1_COMP *cpi, ThreadData *td,
+static void extend_all(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int block, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, int mi_row, int mi_col,
int mi_row_top, int mi_col_top, RUN_TYPE dry_run,
@@ -5579,13 +5590,13 @@
// then applied to the 2 masked prediction mentioned above in vertical direction
// If the block is split into more than one level, at every stage, masked
// prediction is stored in dst_buf[] passed from higher level.
-static void predict_sb_complex(AV1_COMP *cpi, ThreadData *td,
+static void predict_sb_complex(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
RUN_TYPE dry_run, BLOCK_SIZE bsize,
BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
int dst_stride[3], PC_TREE *pc_tree) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -5637,7 +5648,11 @@
}
#endif // CONFIG_AOM_HIGHBITDEPTH
- if (!dry_run && bsize < top_bsize) cm->counts.partition[ctx][partition]++;
+ if (!dry_run && bsize < top_bsize) {
+ // Explicitly cast away const.
+ FRAME_COUNTS *const frame_counts = (FRAME_COUNTS *)&cm->counts;
+ frame_counts->partition[ctx][partition]++;
+ }
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
@@ -6022,11 +6037,11 @@
#endif // CONFIG_EXT_PARTITION_TYPES
}
-static void rd_supertx_sb(AV1_COMP *cpi, ThreadData *td,
+static void rd_supertx_sb(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
TX_TYPE *best_tx, PC_TREE *pc_tree) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate,
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index a99a5cb..8d67c67 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -710,7 +710,7 @@
return cpi->alt_fb_idx;
}
-static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
+static INLINE int get_ref_frame_buf_idx(const AV1_COMP *cpi,
MV_REFERENCE_FRAME ref_frame) {
const AV1_COMMON *const cm = &cpi->common;
const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
@@ -718,15 +718,15 @@
}
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
- AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+ const AV1_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
: NULL;
}
static INLINE const YV12_BUFFER_CONFIG *get_upsampled_ref(
- AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
+ const AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
// Use up-sampled reference frames.
const int buf_idx =
cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)];
@@ -797,7 +797,7 @@
}
#endif // CONFIG_EXT_REFS
-static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void set_ref_ptrs(const AV1_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
xd->block_refs[0] =
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index bd90739..2937853 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -1805,9 +1805,9 @@
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-static int full_pixel_diamond(AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
- int step_param, int sadpb, int further_steps,
- int do_refine, int *cost_list,
+static int full_pixel_diamond(const AV1_COMP *const cpi, MACROBLOCK *x,
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine, int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv) {
MV temp_mv;
@@ -1870,7 +1870,7 @@
#define MIN_INTERVAL 1
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
-static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
+static int full_pixel_exhaustive(const AV1_COMP *const cpi, MACROBLOCK *x,
const MV *centre_mv_full, int sadpb,
int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
@@ -2243,7 +2243,7 @@
}
#define MIN_EX_SEARCH_LIMIT 128
-static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
+static int is_exhaustive_allowed(const AV1_COMP *const cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
const int max_ex =
AOMMAX(MIN_EX_SEARCH_LIMIT,
@@ -2254,13 +2254,13 @@
(*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
-int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+int av1_full_pixel_search(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
MV *mvp_full, int step_param, int error_per_bit,
int *cost_list, const MV *ref_mv, int var_max,
int rd) {
const SPEED_FEATURES *const sf = &cpi->sf;
const SEARCH_METHODS method = sf->mv.search_method;
- aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
+ const aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
int var = 0;
if (cost_list) {
@@ -2530,7 +2530,7 @@
}
int av1_find_best_masked_sub_pixel_tree_up(
- AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+ const AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
@@ -3031,7 +3031,7 @@
}
int av1_find_best_obmc_sub_pixel_tree_up(
- AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
+ const AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index 8c42825..e244a3f 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -114,10 +114,10 @@
struct AV1_COMP;
-int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
- MV *mvp_full, int step_param, int error_per_bit,
- int *cost_list, const MV *ref_mv, int var_max,
- int rd);
+int av1_full_pixel_search(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, MV *mvp_full, int step_param,
+ int error_per_bit, int *cost_list, const MV *ref_mv,
+ int var_max, int rd);
#if CONFIG_EXT_INTER
int av1_find_best_masked_sub_pixel_tree(
@@ -127,11 +127,11 @@
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second);
int av1_find_best_masked_sub_pixel_tree_up(
- struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
- int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
- int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
- int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, int is_second, int use_upsampled_ref);
+ const struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask,
+ int mask_stride, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
+ int allow_hp, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
+ int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
int av1_masked_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
const uint8_t *mask, int mask_stride,
MV *mvp_full, int step_param, int sadpb,
@@ -147,8 +147,8 @@
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv, int is_second);
int av1_find_best_obmc_sub_pixel_tree_up(
- struct AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
- const MV *ref_mv, int allow_hp, int error_per_bit,
+ const struct AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col,
+ MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second, int use_upsampled_ref);
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 2b9171f..5015837 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -619,7 +619,7 @@
get_entropy_contexts_plane(plane_bsize, tx_size, pd, t_above, t_left);
}
-void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+void av1_mv_pred(const AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 3ca4768..c9d21a8 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -398,8 +398,7 @@
void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n,
unsigned int qstep, int *rate, int64_t *dist);
-int av1_get_switchable_rate(const struct AV1_COMP *cpi,
- const MACROBLOCKD *const xd);
+int av1_get_switchable_rate(const struct AV1_COMP *cpi, const MACROBLOCKD *xd);
int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
int stride);
@@ -438,8 +437,9 @@
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
-void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
- int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
+void av1_mv_pred(const struct AV1_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride, int ref_frame,
+ BLOCK_SIZE block_size);
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
x->errorperbit = rdmult >> RD_EPB_SHIFT;
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index bd5ae7e..4707517 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -1294,10 +1294,10 @@
}
#endif // CONFIG_SUPERTX
-static int64_t txfm_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
- int *s, int64_t *sse, int64_t ref_best_rd,
+static int64_t txfm_yrd(const AV1_COMP *const cpi, MACROBLOCK *x, int *r,
+ int64_t *d, int *s, int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs, TX_TYPE tx_type, int tx_size) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int64_t rd = INT64_MAX;
@@ -1371,12 +1371,12 @@
return rd;
}
-static int64_t choose_tx_size_fix_type(AV1_COMP *cpi, BLOCK_SIZE bs,
+static int64_t choose_tx_size_fix_type(const AV1_COMP *const cpi, BLOCK_SIZE bs,
MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *psse, int64_t ref_best_rd,
TX_TYPE tx_type, int prune) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int r, s;
@@ -1494,18 +1494,19 @@
}
#if CONFIG_EXT_INTER
-static int64_t estimate_yrd_for_sb(AV1_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
- int *r, int64_t *d, int *s, int64_t *sse,
- int64_t ref_best_rd) {
+static int64_t estimate_yrd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bs,
+ MACROBLOCK *x, int *r, int64_t *d, int *s,
+ int64_t *sse, int64_t ref_best_rd) {
return txfm_yrd(cpi, x, r, d, s, sse, ref_best_rd, bs, DCT_DCT,
max_txsize_lookup[bs]);
}
#endif // CONFIG_EXT_INTER
-static void choose_largest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int64_t *distortion, int *skip, int64_t *sse,
- int64_t ref_best_rd, BLOCK_SIZE bs) {
- AV1_COMMON *const cm = &cpi->common;
+static void choose_largest_tx_size(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int64_t *distortion, int *skip,
+ int64_t *sse, int64_t ref_best_rd,
+ BLOCK_SIZE bs) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TX_TYPE tx_type, best_tx_type = DCT_DCT;
@@ -1629,8 +1630,8 @@
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_smallest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int64_t *distortion, int *skip,
+static void choose_smallest_tx_size(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int64_t *distortion, int *skip,
int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1643,7 +1644,8 @@
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_tx_size_type_from_rd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_tx_size_type_from_rd(const AV1_COMP *const cpi,
+ MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip,
int64_t *psse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
@@ -1693,7 +1695,7 @@
#endif
}
-static void super_block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
+static void super_block_yrd(const AV1_COMP *const cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skip, int64_t *psse,
BLOCK_SIZE bs, int64_t ref_best_rd) {
MACROBLOCKD *xd = &x->e_mbd;
@@ -1733,7 +1735,7 @@
#if CONFIG_PALETTE
static int rd_pick_palette_intra_sby(
- AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
+ const AV1_COMP *const cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
int dc_mode_cost, PALETTE_MODE_INFO *palette_mode_info,
uint8_t *best_palette_color_map, TX_SIZE *best_tx, TX_TYPE *best_tx_type,
PREDICTION_MODE *mode_selected, int64_t *best_rd) {
@@ -1878,13 +1880,11 @@
}
#endif // CONFIG_PALETTE
-static int64_t rd_pick_intra4x4block(AV1_COMP *cpi, MACROBLOCK *x, int row,
- int col, PREDICTION_MODE *best_mode,
- const int *bmode_costs, ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l, int *bestrate,
- int *bestratey, int64_t *bestdistortion,
- BLOCK_SIZE bsize, int *y_skip,
- int64_t rd_thresh) {
+static int64_t rd_pick_intra4x4block(
+ const AV1_COMP *const cpi, MACROBLOCK *x, int row, int col,
+ PREDICTION_MODE *best_mode, const int *bmode_costs, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, int *bestrate, int *bestratey, int64_t *bestdistortion,
+ BLOCK_SIZE bsize, int *y_skip, int64_t rd_thresh) {
PREDICTION_MODE mode;
MACROBLOCKD *const xd = &x->e_mbd;
int64_t best_rd = rd_thresh;
@@ -2151,10 +2151,10 @@
return best_rd;
}
-static int64_t rd_pick_intra_sub_8x8_y_mode(AV1_COMP *cpi, MACROBLOCK *mb,
- int *rate, int *rate_y,
- int64_t *distortion, int *y_skip,
- int64_t best_rd) {
+static int64_t rd_pick_intra_sub_8x8_y_mode(const AV1_COMP *const cpi,
+ MACROBLOCK *mb, int *rate,
+ int *rate_y, int64_t *distortion,
+ int *y_skip, int64_t best_rd) {
int i, j;
const MACROBLOCKD *const xd = &mb->e_mbd;
MODE_INFO *const mic = xd->mi[0];
@@ -2247,11 +2247,11 @@
#if CONFIG_EXT_INTRA
// Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- int mode_cost, int64_t *best_rd,
- uint16_t skip_mask) {
+static int rd_pick_ext_intra_sby(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, int mode_cost,
+ int64_t *best_rd, uint16_t skip_mask) {
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mic = xd->mi[0];
MB_MODE_INFO *mbmi = &mic->mbmi;
@@ -2310,7 +2310,7 @@
}
static void pick_intra_angle_routine_sby(
- AV1_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
+ const AV1_COMP *const cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
int64_t *distortion, int *skippable, int *best_angle_delta,
TX_SIZE *best_tx_size, TX_TYPE *best_tx_type, INTRA_FILTER *best_filter,
BLOCK_SIZE bsize, int rate_overhead, int64_t *best_rd) {
@@ -2337,10 +2337,11 @@
}
}
-static int64_t rd_pick_intra_angle_sby(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- int rate_overhead, int64_t best_rd) {
+static int64_t rd_pick_intra_angle_sby(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, int rate_overhead,
+ int64_t best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mic = xd->mi[0];
MB_MODE_INFO *mbmi = &mic->mbmi;
@@ -2598,10 +2599,10 @@
#endif // CONFIG_EXT_INTRA
// This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- int64_t best_rd) {
+static int64_t rd_pick_intra_sby_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, int64_t best_rd) {
uint8_t mode_idx;
PREDICTION_MODE mode_selected = DC_PRED;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2625,7 +2626,7 @@
int beat_best_rd = 0;
#endif // CONFIG_EXT_INTRA
TX_TYPE best_tx_type = DCT_DCT;
- int *bmode_costs;
+ const int *bmode_costs;
#if CONFIG_PALETTE
PALETTE_MODE_INFO palette_mode_info;
PALETTE_MODE_INFO *const pmi = &mic->mbmi.palette_mode_info;
@@ -2812,7 +2813,7 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int super_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
+static int super_block_uvrd(const AV1_COMP *const cpi, MACROBLOCK *x, int *rate,
int64_t *distortion, int *skippable, int64_t *sse,
BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -3593,7 +3594,7 @@
#if CONFIG_PALETTE
static void rd_pick_palette_intra_sbuv(
- AV1_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
+ const AV1_COMP *const cpi, MACROBLOCK *x, int dc_mode_cost,
PALETTE_MODE_INFO *palette_mode_info, uint8_t *best_palette_color_map,
PREDICTION_MODE *mode_selected, int64_t *best_rd, int *rate,
int *rate_tokenonly, int64_t *distortion, int *skippable) {
@@ -3760,10 +3761,10 @@
#if CONFIG_EXT_INTRA
// Return 1 if an ext intra mode is selected; return 0 otherwise.
-static int rd_pick_ext_intra_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- int64_t *best_rd) {
+static int rd_pick_ext_intra_sbuv(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, int64_t *best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int ext_intra_selected_flag = 0;
@@ -3813,12 +3814,10 @@
}
}
-static void pick_intra_angle_routine_sbuv(AV1_COMP *cpi, MACROBLOCK *x,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- int *best_angle_delta,
- BLOCK_SIZE bsize, int rate_overhead,
- int64_t *best_rd) {
+static void pick_intra_angle_routine_sbuv(
+ const AV1_COMP *const cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable, int *best_angle_delta,
+ BLOCK_SIZE bsize, int rate_overhead, int64_t *best_rd) {
MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
int this_rate_tokenonly, this_rate, s;
int64_t this_distortion, this_sse, this_rd;
@@ -3839,10 +3838,11 @@
}
}
-static int rd_pick_intra_angle_sbuv(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- int rate_overhead, int64_t best_rd) {
+static int rd_pick_intra_angle_sbuv(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, int rate_overhead,
+ int64_t best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int this_rate_tokenonly, this_rate, s;
@@ -3909,10 +3909,10 @@
}
#endif // CONFIG_EXT_INTRA
-static int64_t rd_pick_intra_sbuv_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
- int *rate_tokenonly, int64_t *distortion,
- int *skippable, BLOCK_SIZE bsize,
- TX_SIZE max_tx_size) {
+static int64_t rd_pick_intra_sbuv_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
+ int *rate, int *rate_tokenonly,
+ int64_t *distortion, int *skippable,
+ BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
PREDICTION_MODE mode;
@@ -4039,7 +4039,7 @@
return best_rd;
}
-static void choose_intra_uv_mode(AV1_COMP *cpi, MACROBLOCK *const x,
+static void choose_intra_uv_mode(const AV1_COMP *const cpi, MACROBLOCK *const x,
PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
TX_SIZE max_tx_size, int *rate_uv,
int *rate_uv_tokenonly, int64_t *dist_uv,
@@ -4052,7 +4052,7 @@
*mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
}
-static int cost_mv_ref(const AV1_COMP *cpi, PREDICTION_MODE mode,
+static int cost_mv_ref(const AV1_COMP *const cpi, PREDICTION_MODE mode,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
int is_compound,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4148,8 +4148,9 @@
2);
#endif // CONFIG_GLOBAL_MOTION
-static int set_and_cost_bmi_mvs(AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
- int i, PREDICTION_MODE mode, int_mv this_mv[2],
+static int set_and_cost_bmi_mvs(const AV1_COMP *const cpi, MACROBLOCK *x,
+ MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
+ int_mv this_mv[2],
int_mv frame_mv[MB_MODE_COUNT]
[TOTAL_REFS_PER_FRAME],
int_mv seg_mvs[TOTAL_REFS_PER_FRAME],
@@ -4322,7 +4323,7 @@
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
}
-static int64_t encode_inter_mb_segment(AV1_COMP *cpi, MACROBLOCK *x,
+static int64_t encode_inter_mb_segment(const AV1_COMP *const cpi, MACROBLOCK *x,
int64_t best_yrd, int i, int *labelyrate,
int64_t *distortion, int64_t *sse,
ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
@@ -4499,7 +4500,7 @@
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
static int check_best_zero_mv(
- const AV1_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
+ const AV1_COMP *const cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
#if CONFIG_REF_MV && CONFIG_EXT_INTER
const int16_t compound_mode_context[TOTAL_REFS_PER_FRAME],
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -4601,8 +4602,9 @@
return 1;
}
-static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
- int_mv *frame_mv, int mi_row, int mi_col,
+static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int_mv *frame_mv, int mi_row,
+ int mi_col,
#if CONFIG_EXT_INTER
int_mv *ref_mv_sub8x8[2],
#endif
@@ -4834,7 +4836,7 @@
}
static int64_t rd_pick_best_sub8x8_mode(
- AV1_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+ const AV1_COMP *const cpi, MACROBLOCK *x, int_mv *best_ref_mv,
int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
int mvthresh,
@@ -4856,7 +4858,7 @@
int k, br = 0, idx, idy;
int64_t bd = 0, block_sse = 0;
PREDICTION_MODE this_mode;
- AV1_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &xd->plane[0];
const int label_count = 4;
@@ -5772,7 +5774,7 @@
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
}
-static void setup_buffer_inter(AV1_COMP *cpi, MACROBLOCK *x,
+static void setup_buffer_inter(const AV1_COMP *const cpi, MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
BLOCK_SIZE block_size, int mi_row, int mi_col,
int_mv frame_nearest_mv[TOTAL_REFS_PER_FRAME],
@@ -5817,8 +5819,8 @@
block_size);
}
-static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
- int mi_row, int mi_col,
+static void single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
#if CONFIG_EXT_INTER
int ref_idx, int mv_idx,
#endif // CONFIG_EXT_INTER
@@ -6063,7 +6065,7 @@
}
#if CONFIG_EXT_INTER
-static void do_masked_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
+static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
const uint8_t *mask, int mask_stride,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv, int ref_idx,
@@ -6194,12 +6196,12 @@
}
}
-static void do_masked_motion_search_indexed(AV1_COMP *cpi, MACROBLOCK *x,
- int wedge_index, int wedge_sign,
- BLOCK_SIZE bsize, int mi_row,
- int mi_col, int_mv *tmp_mv,
- int *rate_mv, int mv_idx[2],
- int which) {
+static void do_masked_motion_search_indexed(const AV1_COMP *const cpi,
+ MACROBLOCK *x, int wedge_index,
+ int wedge_sign, BLOCK_SIZE bsize,
+ int mi_row, int mi_col,
+ int_mv *tmp_mv, int *rate_mv,
+ int mv_idx[2], int which) {
// NOTE: which values: 0 - 0 only, 1 - 1 only, 2 - both
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -6228,7 +6230,7 @@
// However, once established that vector may be usable through the nearest and
// near mv modes to reduce distortion in subsequent blocks and also improve
// visual quality.
-static int discount_newmv_test(const AV1_COMP *cpi, int this_mode,
+static int discount_newmv_test(const AV1_COMP *const cpi, int this_mode,
int_mv this_mv,
int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME],
int ref_frame) {
@@ -6594,7 +6596,7 @@
#endif // CONFIG_EXT_INTER
static int64_t handle_inter_mode(
- AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+ const AV1_COMP *const cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
int *disable_skip, int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME], int mi_row,
int mi_col,
@@ -6613,7 +6615,7 @@
InterpFilter (*single_filter)[TOTAL_REFS_PER_FRAME],
int (*single_skippable)[TOTAL_REFS_PER_FRAME], int64_t *psse,
const int64_t ref_best_rd) {
- AV1_COMMON *cm = &cpi->common;
+ const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
@@ -7752,10 +7754,10 @@
return 0; // The rate-distortion cost will be re-calculated by caller.
}
-void av1_rd_pick_intra_mode_sb(AV1_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd) {
- AV1_COMMON *const cm = &cpi->common;
+void av1_rd_pick_intra_mode_sb(const AV1_COMP *cpi, MACROBLOCK *x,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblockd_plane *const pd = xd->plane;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
@@ -7841,7 +7843,7 @@
}
// Do we have an internal image edge (e.g. formatting bars).
-int av1_internal_image_edge(AV1_COMP *cpi) {
+int av1_internal_image_edge(const AV1_COMP *cpi) {
return (cpi->oxcf.pass == 2) &&
((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
(cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
@@ -7850,14 +7852,14 @@
// Checks to see if a super block is on a horizontal image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int av1_active_h_edge(AV1_COMP *cpi, int mi_row, int mi_step) {
+int av1_active_h_edge(const AV1_COMP *cpi, int mi_row, int mi_step) {
int top_edge = 0;
int bottom_edge = cpi->common.mi_rows;
int is_active_h_edge = 0;
// For two pass account for any formatting bars detected.
if (cpi->oxcf.pass == 2) {
- TWO_PASS *twopass = &cpi->twopass;
+ const TWO_PASS *const twopass = &cpi->twopass;
// The inactive region is specified in MBs not mi units.
// The image edge is in the following MB row.
@@ -7877,14 +7879,14 @@
// Checks to see if a super block is on a vertical image edge.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int av1_active_v_edge(AV1_COMP *cpi, int mi_col, int mi_step) {
+int av1_active_v_edge(const AV1_COMP *cpi, int mi_col, int mi_step) {
int left_edge = 0;
int right_edge = cpi->common.mi_cols;
int is_active_v_edge = 0;
// For two pass account for any formatting bars detected.
if (cpi->oxcf.pass == 2) {
- TWO_PASS *twopass = &cpi->twopass;
+ const TWO_PASS *const twopass = &cpi->twopass;
// The inactive region is specified in MBs not mi units.
// The image edge is in the following MB row.
@@ -7904,13 +7906,13 @@
// Checks to see if a super block is at the edge of the active image.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int av1_active_edge_sb(AV1_COMP *cpi, int mi_row, int mi_col) {
+int av1_active_edge_sb(const AV1_COMP *cpi, int mi_row, int mi_col) {
return av1_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
av1_active_v_edge(cpi, mi_col, cpi->common.mib_size);
}
#if CONFIG_PALETTE
-static void restore_uv_color_map(AV1_COMP *cpi, MACROBLOCK *x) {
+static void restore_uv_color_map(const AV1_COMP *const cpi, MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
@@ -7961,10 +7963,10 @@
#if CONFIG_EXT_INTRA
static void pick_ext_intra_interframe(
- AV1_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
- int *rate_uv_intra, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv,
- PREDICTION_MODE *mode_uv, EXT_INTRA_MODE_INFO *ext_intra_mode_info_uv,
- int8_t *uv_angle_delta,
+ const AV1_COMP *const cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
+ BLOCK_SIZE bsize, int *rate_uv_intra, int *rate_uv_tokenonly,
+ int64_t *dist_uv, int *skip_uv, PREDICTION_MODE *mode_uv,
+ EXT_INTRA_MODE_INFO *ext_intra_mode_info_uv, int8_t *uv_angle_delta,
#if CONFIG_PALETTE
PALETTE_MODE_INFO *pmi_uv, int palette_ctx,
#endif // CONFIG_PALETTE
@@ -7975,7 +7977,7 @@
int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, RD_COST *rd_cost) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
#if CONFIG_PALETTE
@@ -8138,7 +8140,7 @@
int left_stride);
#endif // CONFIG_MOTION_VAR
-void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data,
MACROBLOCK *x, int mi_row, int mi_col,
RD_COST *rd_cost,
#if CONFIG_SUPERTX
@@ -8146,9 +8148,9 @@
#endif // CONFIG_SUPERTX
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- AV1_COMMON *const cm = &cpi->common;
- RD_OPT *const rd_opt = &cpi->rd;
- SPEED_FEATURES *const sf = &cpi->sf;
+ const AV1_COMMON *const cm = &cpi->common;
+ const RD_OPT *const rd_opt = &cpi->rd;
+ const SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
#if CONFIG_PALETTE
@@ -9723,12 +9725,12 @@
#endif // CONFIG_PALETTE
}
-void av1_rd_pick_inter_mode_sb_seg_skip(AV1_COMP *cpi, TileDataEnc *tile_data,
- MACROBLOCK *x, RD_COST *rd_cost,
- BLOCK_SIZE bsize,
+void av1_rd_pick_inter_mode_sb_seg_skip(const AV1_COMP *cpi,
+ TileDataEnc *tile_data, MACROBLOCK *x,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
unsigned char segment_id = mbmi->segment_id;
@@ -9859,17 +9861,18 @@
store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, 0);
}
-void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi, TileDataEnc *tile_data,
- struct macroblock *x, int mi_row, int mi_col,
+void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi,
+ TileDataEnc *tile_data, struct macroblock *x,
+ int mi_row, int mi_col,
struct RD_COST *rd_cost,
#if CONFIG_SUPERTX
int *returnrate_nocoef,
#endif // CONFIG_SUPERTX
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
- AV1_COMMON *const cm = &cpi->common;
- RD_OPT *const rd_opt = &cpi->rd;
- SPEED_FEATURES *const sf = &cpi->sf;
+ const AV1_COMMON *const cm = &cpi->common;
+ const RD_OPT *const rd_opt = &cpi->rd;
+ const SPEED_FEATURES *const sf = &cpi->sf;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index 584c439..16afaf3 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -29,20 +29,20 @@
int av1_cost_coeffs(MACROBLOCK *x, int plane, int block, int coeff_ctx,
TX_SIZE tx_size, const int16_t *scan, const int16_t *nb,
int use_fast_coef_costing);
-void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
+void av1_rd_pick_intra_mode_sb(const struct AV1_COMP *cpi, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd);
-unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs);
#if CONFIG_AOM_HIGHBITDEPTH
-unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
+unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd);
#endif
-void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
+void av1_rd_pick_inter_mode_sb(const struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row, int mi_col,
struct RD_COST *rd_cost,
@@ -53,16 +53,16 @@
int64_t best_rd_so_far);
void av1_rd_pick_inter_mode_sb_seg_skip(
- struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
- struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ const struct AV1_COMP *cpi, struct TileDataEnc *tile_data,
+ struct macroblock *x, struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
-int av1_internal_image_edge(struct AV1_COMP *cpi);
-int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
-int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
-int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
+int av1_internal_image_edge(const struct AV1_COMP *cpi);
+int av1_active_h_edge(const struct AV1_COMP *cpi, int mi_row, int mi_step);
+int av1_active_v_edge(const struct AV1_COMP *cpi, int mi_col, int mi_step);
+int av1_active_edge_sb(const struct AV1_COMP *cpi, int mi_row, int mi_col);
-void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
+void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row, int mi_col,
struct RD_COST *rd_cost,
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index 173c934..89ea05b 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -344,7 +344,7 @@
#endif // !CONFIG_ANS
struct tokenize_b_args {
- AV1_COMP *cpi;
+ const AV1_COMP *cpi;
ThreadData *td;
TOKENEXTRA **tp;
int this_rate;
@@ -409,7 +409,7 @@
}
#if CONFIG_PALETTE
-void av1_tokenize_palette_sb(AV1_COMP *cpi, struct ThreadData *const td,
+void av1_tokenize_palette_sb(const AV1_COMP *cpi, struct ThreadData *const td,
int plane, TOKENEXTRA **t, RUN_TYPE dry_run,
BLOCK_SIZE bsize, int *rate) {
MACROBLOCK *const x = &td->mb;
@@ -454,7 +454,7 @@
static void tokenize_b(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct tokenize_b_args *const args = arg;
- AV1_COMP *cpi = args->cpi;
+ const AV1_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -481,7 +481,7 @@
unsigned int(*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
#if CONFIG_ENTROPY
- aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
cpi->subframe_stats.coef_probs_buf[cpi->common.coef_probs_update_idx]
[txsize_sqr_map[tx_size]][type][ref];
#else
@@ -641,10 +641,10 @@
}
}
-void av1_tokenize_sb_vartx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+void av1_tokenize_sb_vartx(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
RUN_TYPE dry_run, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -696,9 +696,9 @@
}
#endif // CONFIG_VAR_TX
-void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+void av1_tokenize_sb(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
RUN_TYPE dry_run, BLOCK_SIZE bsize, int *rate) {
- AV1_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -732,9 +732,10 @@
}
#if CONFIG_SUPERTX
-void av1_tokenize_sb_supertx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- RUN_TYPE dry_run, BLOCK_SIZE bsize, int *rate) {
- AV1_COMMON *const cm = &cpi->common;
+void av1_tokenize_sb_supertx(const AV1_COMP *cpi, ThreadData *td,
+ TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
+ int *rate) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &td->mb.e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TOKENEXTRA *t_backup = *t;
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index 677d041..e869a19 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -66,20 +66,21 @@
// with the coefficient token cost only if dry_run = DRY_RUN_COSTCOEFS,
// otherwise rate is not incremented.
#if CONFIG_VAR_TX
-void av1_tokenize_sb_vartx(struct AV1_COMP *cpi, struct ThreadData *td,
+void av1_tokenize_sb_vartx(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate);
#endif
#if CONFIG_PALETTE
-void av1_tokenize_palette_sb(struct AV1_COMP *cpi, struct ThreadData *const td,
- int plane, TOKENEXTRA **t, RUN_TYPE dry_run,
- BLOCK_SIZE bsize, int *rate);
+void av1_tokenize_palette_sb(const struct AV1_COMP *cpi,
+ struct ThreadData *const td, int plane,
+ TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
+ int *rate);
#endif // CONFIG_PALETTE
-void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
+void av1_tokenize_sb(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
#if CONFIG_SUPERTX
-void av1_tokenize_sb_supertx(struct AV1_COMP *cpi, struct ThreadData *td,
+void av1_tokenize_sb_supertx(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
#endif
diff --git a/configure b/configure
index 3c59c6e..d911d9a 100755
--- a/configure
+++ b/configure
@@ -606,7 +606,6 @@
check_add_cflags -Wfloat-conversion
check_add_cflags -Wpointer-arith
check_add_cflags -Wtype-limits
- check_add_cflags -Wcast-qual
check_add_cflags -Wvla
check_add_cflags -Wimplicit-function-declaration
check_add_cflags -Wuninitialized