blob: b571d29d9a44682b11331ad2a2c687e7db666fca [file] [log] [blame]
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "./vpx_scale_rtcd.h"
#include "./vpx_dsp_rtcd.h"
#include "./vp8_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "vp8/common/blockd.h"
#include "onyx_int.h"
#include "vp8/common/systemdependent.h"
#include "vp8/encoder/quantize.h"
#include "vp8/common/alloccommon.h"
#include "mcomp.h"
#include "firstpass.h"
#include "vpx_dsp/psnr.h"
#include "vpx_scale/vpx_scale.h"
#include "vp8/common/extend.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
#include "segmentation.h"
#if CONFIG_POSTPROC
#include "vp8/common/postproc.h"
#endif
#include "vpx_mem/vpx_mem.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h"
#include "vpx_ports/system_state.h"
#include "vpx_ports/vpx_timer.h"
#if ARCH_ARM
#include "vpx_ports/arm.h"
#endif
#if CONFIG_MULTI_RES_ENCODING
#include "mr_dissim.h"
#endif
#include "encodeframe.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <limits.h>
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
extern int vp8_update_coef_context(VP8_COMP *cpi);
extern void vp8_update_coef_probs(VP8_COMP *cpi);
#endif
extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *post, int filt_lvl,
int low_var_thresh, int flag);
extern void print_parms(VP8_CONFIG *ocf, char *filenam);
extern unsigned int vp8_get_processor_freq();
extern void print_tree_update_probs();
extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
int vp8_estimate_entropy_savings(VP8_COMP *cpi);
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
static void set_default_lf_deltas(VP8_COMP *cpi);
extern const int vp8_gf_interval_table[101];
#if CONFIG_INTERNAL_STATS
#include "math.h"
#include "vpx_dsp/ssim.h"
#endif
#ifdef OUTPUT_YUV_SRC
FILE *yuv_file;
#endif
#ifdef OUTPUT_YUV_DENOISED
FILE *yuv_denoised_file;
#endif
#if 0
FILE *framepsnr;
FILE *kf_list;
FILE *keyfile;
#endif
#if 0
extern int skip_true_count;
extern int skip_false_count;
#endif
#ifdef VP8_ENTROPY_STATS
extern int intra_mode_stats[10][10][10];
#endif
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int tot_pm = 0;
unsigned int cnt_pm = 0;
unsigned int tot_ef = 0;
unsigned int cnt_ef = 0;
#endif
#ifdef MODE_STATS
extern unsigned __int64 Sectionbits[50];
extern int y_modes[5];
extern int uv_modes[4];
extern int b_modes[10];
extern int inter_y_modes[10];
extern int inter_uv_modes[4];
extern unsigned int inter_b_modes[15];
#endif
extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
extern const int qrounding_factors[129];
extern const int qzbin_factors[129];
extern void vp8cx_init_quantizer(VP8_COMP *cpi);
extern const int vp8cx_base_skip_false_prob[128];
/* Tables relating active max Q to active min Q */
static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
};
static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
};
static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
};
static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
};
static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
};
static const unsigned char inter_minq[QINDEX_RANGE] = {
0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
};
#ifdef PACKET_TESTING
extern FILE *vpxlogc;
#endif
static void save_layer_context(VP8_COMP *cpi) {
LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
/* Save layer dependent coding state */
lc->target_bandwidth = cpi->target_bandwidth;
lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
lc->buffer_level = cpi->buffer_level;
lc->bits_off_target = cpi->bits_off_target;
lc->total_actual_bits = cpi->total_actual_bits;
lc->worst_quality = cpi->worst_quality;
lc->active_worst_quality = cpi->active_worst_quality;
lc->best_quality = cpi->best_quality;
lc->active_best_quality = cpi->active_best_quality;
lc->ni_av_qi = cpi->ni_av_qi;
lc->ni_tot_qi = cpi->ni_tot_qi;
lc->ni_frames = cpi->ni_frames;
lc->avg_frame_qindex = cpi->avg_frame_qindex;
lc->rate_correction_factor = cpi->rate_correction_factor;
lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
lc->zbin_over_quant = cpi->mb.zbin_over_quant;
lc->inter_frame_target = cpi->inter_frame_target;
lc->total_byte_count = cpi->total_byte_count;
lc->filter_level = cpi->common.filter_level;
lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
sizeof(cpi->mb.count_mb_ref_frame_usage));
}
static void restore_layer_context(VP8_COMP *cpi, const int layer) {
LAYER_CONTEXT *lc = &cpi->layer_context[layer];
/* Restore layer dependent coding state */
cpi->current_layer = layer;
cpi->target_bandwidth = lc->target_bandwidth;
cpi->oxcf.target_bandwidth = lc->target_bandwidth;
cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
cpi->buffer_level = lc->buffer_level;
cpi->bits_off_target = lc->bits_off_target;
cpi->total_actual_bits = lc->total_actual_bits;
cpi->active_worst_quality = lc->active_worst_quality;
cpi->active_best_quality = lc->active_best_quality;
cpi->ni_av_qi = lc->ni_av_qi;
cpi->ni_tot_qi = lc->ni_tot_qi;
cpi->ni_frames = lc->ni_frames;
cpi->avg_frame_qindex = lc->avg_frame_qindex;
cpi->rate_correction_factor = lc->rate_correction_factor;
cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
cpi->mb.zbin_over_quant = lc->zbin_over_quant;
cpi->inter_frame_target = lc->inter_frame_target;
cpi->total_byte_count = lc->total_byte_count;
cpi->common.filter_level = lc->filter_level;
cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
sizeof(cpi->mb.count_mb_ref_frame_usage));
}
static int rescale(int val, int num, int denom) {
int64_t llnum = num;
int64_t llden = denom;
int64_t llval = val;
return (int)(llval * llnum / llden);
}
static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf,
const int layer,
double prev_layer_framerate) {
LAYER_CONTEXT *lc = &cpi->layer_context[layer];
lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
lc->starting_buffer_level =
rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
if (oxcf->optimal_buffer_level == 0) {
lc->optimal_buffer_level = lc->target_bandwidth / 8;
} else {
lc->optimal_buffer_level =
rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
}
if (oxcf->maximum_buffer_size == 0) {
lc->maximum_buffer_size = lc->target_bandwidth / 8;
} else {
lc->maximum_buffer_size =
rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
}
/* Work out the average size of a frame within this layer */
if (layer > 0) {
lc->avg_frame_size_for_layer =
(int)((cpi->oxcf.target_bitrate[layer] -
cpi->oxcf.target_bitrate[layer - 1]) *
1000 / (lc->framerate - prev_layer_framerate));
}
lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
lc->active_best_quality = cpi->oxcf.best_allowed_q;
lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
lc->buffer_level = lc->starting_buffer_level;
lc->bits_off_target = lc->starting_buffer_level;
lc->total_actual_bits = 0;
lc->ni_av_qi = 0;
lc->ni_tot_qi = 0;
lc->ni_frames = 0;
lc->rate_correction_factor = 1.0;
lc->key_frame_rate_correction_factor = 1.0;
lc->gf_rate_correction_factor = 1.0;
lc->inter_frame_target = 0;
}
// Upon a run-time change in temporal layers, reset the layer context parameters
// for any "new" layers. For "existing" layers, let them inherit the parameters
// from the previous layer state (at the same layer #). In future we may want
// to better map the previous layer state(s) to the "new" ones.
static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
const int prev_num_layers) {
int i;
double prev_layer_framerate = 0;
const int curr_num_layers = cpi->oxcf.number_of_layers;
// If the previous state was 1 layer, get current layer context from cpi.
// We need this to set the layer context for the new layers below.
if (prev_num_layers == 1) {
cpi->current_layer = 0;
save_layer_context(cpi);
}
for (i = 0; i < curr_num_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
if (i >= prev_num_layers) {
init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
}
// The initial buffer levels are set based on their starting levels.
// We could set the buffer levels based on the previous state (normalized
// properly by the layer bandwidths) but we would need to keep track of
// the previous set of layer bandwidths (i.e., target_bitrate[i])
// before the layer change. For now, reset to the starting levels.
lc->buffer_level =
cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
lc->bits_off_target = lc->buffer_level;
// TDOD(marpan): Should we set the rate_correction_factor and
// active_worst/best_quality to values derived from the previous layer
// state (to smooth-out quality dips/rate fluctuation at transition)?
// We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
// is not set for 1 layer, and the restore_layer_context/save_context()
// are not called in the encoding loop, so we need to call it here to
// pass the layer context state to |cpi|.
if (curr_num_layers == 1) {
lc->target_bandwidth = cpi->oxcf.target_bandwidth;
lc->buffer_level =
cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
lc->bits_off_target = lc->buffer_level;
restore_layer_context(cpi, 0);
}
prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
}
}
static void setup_features(VP8_COMP *cpi) {
// If segmentation enabled set the update flags
if (cpi->mb.e_mbd.segmentation_enabled) {
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
} else {
cpi->mb.e_mbd.update_mb_segmentation_map = 0;
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
}
cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
sizeof(cpi->mb.e_mbd.ref_lf_deltas));
memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
sizeof(cpi->mb.e_mbd.mode_lf_deltas));
set_default_lf_deltas(cpi);
}
static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
void vp8_initialize_enc(void) {
static volatile int init_done = 0;
if (!init_done) {
vpx_dsp_rtcd();
vp8_init_intra_predictors();
init_done = 1;
}
}
static void dealloc_compressor_data(VP8_COMP *cpi) {
vpx_free(cpi->tplist);
cpi->tplist = NULL;
/* Delete last frame MV storage buffers */
vpx_free(cpi->lfmv);
cpi->lfmv = 0;
vpx_free(cpi->lf_ref_frame_sign_bias);
cpi->lf_ref_frame_sign_bias = 0;
vpx_free(cpi->lf_ref_frame);
cpi->lf_ref_frame = 0;
/* Delete sementation map */
vpx_free(cpi->segmentation_map);
cpi->segmentation_map = 0;
vpx_free(cpi->active_map);
cpi->active_map = 0;
vp8_de_alloc_frame_buffers(&cpi->common);
vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
dealloc_raw_frame_buffers(cpi);
vpx_free(cpi->tok);
cpi->tok = 0;
/* Structure used to monitor GF usage */
vpx_free(cpi->gf_active_flags);
cpi->gf_active_flags = 0;
/* Activity mask based per mb zbin adjustments */
vpx_free(cpi->mb_activity_map);
cpi->mb_activity_map = 0;
vpx_free(cpi->mb.pip);
cpi->mb.pip = 0;
#if CONFIG_MULTITHREAD
/* De-allocate mutex */
if (cpi->pmutex != NULL) {
VP8_COMMON *const pc = &cpi->common;
int i;
for (i = 0; i < pc->mb_rows; ++i) {
pthread_mutex_destroy(&cpi->pmutex[i]);
}
vpx_free(cpi->pmutex);
cpi->pmutex = NULL;
}
vpx_free(cpi->mt_current_mb_col);
cpi->mt_current_mb_col = NULL;
#endif
}
static void enable_segmentation(VP8_COMP *cpi) {
/* Set the appropriate feature bit */
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
static void disable_segmentation(VP8_COMP *cpi) {
/* Clear the appropriate feature bit */
cpi->mb.e_mbd.segmentation_enabled = 0;
}
/* Valid values for a segment are 0 to 3
* Segmentation map is arrange as [Rows][Columns]
*/
static void set_segmentation_map(VP8_COMP *cpi,
unsigned char *segmentation_map) {
/* Copy in the new segmentation map */
memcpy(cpi->segmentation_map, segmentation_map,
(cpi->common.mb_rows * cpi->common.mb_cols));
/* Signal that the map should be updated. */
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
/* The values given for each segment can be either deltas (from the default
* value chosen for the frame) or absolute values.
*
* Valid range for abs values is:
* (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
* Valid range for delta values are:
* (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
*
* abs_delta = SEGMENT_DELTADATA (deltas)
* abs_delta = SEGMENT_ABSDATA (use the absolute values given).
*
*/
static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
unsigned char abs_delta) {
cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
memcpy(cpi->segment_feature_data, feature_data,
sizeof(cpi->segment_feature_data));
}
/* A simple function to cyclically refresh the background at a lower Q */
static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
unsigned char *seg_map = cpi->segmentation_map;
signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
int i;
int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
cpi->cyclic_refresh_q = Q / 2;
if (cpi->oxcf.screen_content_mode) {
// Modify quality ramp-up based on Q. Above some Q level, increase the
// number of blocks to be refreshed, and reduce it below the thredhold.
// Turn-off under certain conditions (i.e., away from key frame, and if
// we are at good quality (low Q) and most of the blocks were
// skipped-encoded
// in previous frame.
int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
if (Q >= qp_thresh) {
cpi->cyclic_refresh_mode_max_mbs_perframe =
(cpi->common.mb_rows * cpi->common.mb_cols) / 10;
} else if (cpi->frames_since_key > 250 && Q < 20 &&
cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
} else {
cpi->cyclic_refresh_mode_max_mbs_perframe =
(cpi->common.mb_rows * cpi->common.mb_cols) / 20;
}
block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
}
// Set every macroblock to be eligible for update.
// For key frame this will reset seg map to 0.
memset(cpi->segmentation_map, 0, mbs_in_frame);
if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
/* Cycle through the macro_block rows */
/* MB loop to set local segmentation map */
i = cpi->cyclic_refresh_mode_index;
assert(i < mbs_in_frame);
do {
/* If the MB is as a candidate for clean up then mark it for
* possible boost/refresh (segment 1) The segment id may get
* reset to 0 later if the MB gets coded anything other than
* last frame 0,0 as only (last frame 0,0) MBs are eligable for
* refresh : that is to say Mbs likely to be background blocks.
*/
if (cpi->cyclic_refresh_map[i] == 0) {
seg_map[i] = 1;
block_count--;
} else if (cpi->cyclic_refresh_map[i] < 0) {
cpi->cyclic_refresh_map[i]++;
}
i++;
if (i == mbs_in_frame) i = 0;
} while (block_count && i != cpi->cyclic_refresh_mode_index);
cpi->cyclic_refresh_mode_index = i;
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0) {
if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
(cpi->frames_since_key >
2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
// Under aggressive denoising, use segmentation to turn off loop
// filter below some qp thresh. The filter is reduced for all
// blocks that have been encoded as ZEROMV LAST x frames in a row,
// where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
// This is to avoid "dot" artifacts that can occur from repeated
// loop filtering on noisy input source.
cpi->cyclic_refresh_q = Q;
// lf_adjustment = -MAX_LOOP_FILTER;
lf_adjustment = -40;
for (i = 0; i < mbs_in_frame; ++i) {
seg_map[i] = (cpi->consec_zero_last[i] >
cpi->denoiser.denoise_pars.consec_zerolast)
? 1
: 0;
}
}
}
#endif
}
/* Activate segmentation. */
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
enable_segmentation(cpi);
/* Set up the quant segment data */
feature_data[MB_LVL_ALT_Q][0] = 0;
feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
feature_data[MB_LVL_ALT_Q][2] = 0;
feature_data[MB_LVL_ALT_Q][3] = 0;
/* Set up the loop segment data */
feature_data[MB_LVL_ALT_LF][0] = 0;
feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
feature_data[MB_LVL_ALT_LF][2] = 0;
feature_data[MB_LVL_ALT_LF][3] = 0;
/* Initialise the feature data structure */
set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
}
static void set_default_lf_deltas(VP8_COMP *cpi) {
cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
/* Test of ref frame deltas */
cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
if (cpi->oxcf.Mode == MODE_REALTIME) {
cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
} else {
cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
}
cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
}
/* Convenience macros for mapping speed and mode into a continuous
* range
*/
#define GOOD(x) (x + 1)
#define RT(x) (x + 7)
static int speed_map(int speed, const int *map) {
int res;
do {
res = *map++;
} while (speed >= *map++);
return res;
}
static const int thresh_mult_map_znn[] = {
/* map common to zero, nearest, and near */
0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
};
static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
2000, RT(0), 1000, RT(1),
2000, RT(7), INT_MAX, INT_MAX };
static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
5000, GOOD(3), 7500, RT(0),
2500, RT(1), 5000, RT(6),
INT_MAX, INT_MAX };
static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
2000, RT(0), 0, RT(1),
1000, RT(2), 2000, RT(7),
INT_MAX, INT_MAX };
static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
RT(0), 2000, INT_MAX };
static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
2500, GOOD(5), 4000, RT(0),
2000, RT(2), 2500, RT(5),
4000, INT_MAX };
static const int thresh_mult_map_split1[] = {
2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
};
static const int thresh_mult_map_split2[] = {
5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
};
static const int mode_check_freq_map_zn2[] = {
/* {zero,nearest}{2,3} */
0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
};
static const int mode_check_freq_map_vhbpred[] = {
0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
};
static const int mode_check_freq_map_near2[] = {
0, GOOD(5), 2, RT(0), 0, RT(3), 2,
RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
};
static const int mode_check_freq_map_new1[] = {
0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
};
static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
0, RT(3), 4, RT(10),
1 << 3, RT(11), 1 << 4, RT(12),
1 << 5, INT_MAX };
static const int mode_check_freq_map_split1[] = {
0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
};
static const int mode_check_freq_map_split2[] = {
0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
};
void vp8_set_speed_features(VP8_COMP *cpi) {
SPEED_FEATURES *sf = &cpi->sf;
int Mode = cpi->compressor_speed;
int Speed = cpi->Speed;
int Speed2;
int i;
VP8_COMMON *cm = &cpi->common;
int last_improved_quant = sf->improved_quant;
int ref_frames;
/* Initialise default mode frequency sampling variables */
for (i = 0; i < MAX_MODES; ++i) {
cpi->mode_check_freq[i] = 0;
}
cpi->mb.mbs_tested_so_far = 0;
cpi->mb.mbs_zero_last_dot_suppress = 0;
/* best quality defaults */
sf->RD = 1;
sf->search_method = NSTEP;
sf->improved_quant = 1;
sf->improved_dct = 1;
sf->auto_filter = 1;
sf->recode_loop = 1;
sf->quarter_pixel_search = 1;
sf->half_pixel_search = 1;
sf->iterative_sub_pixel = 1;
sf->optimize_coefficients = 1;
sf->use_fastquant_for_pick = 0;
sf->no_skip_block4x4_search = 1;
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
sf->improved_mv_pred = 1;
/* default thresholds to 0 */
for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
/* Count enabled references */
ref_frames = 1;
if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
/* Convert speed to continuous range, with clamping */
if (Mode == 0) {
Speed = 0;
} else if (Mode == 2) {
Speed = RT(Speed);
} else {
if (Speed > 5) Speed = 5;
Speed = GOOD(Speed);
}
sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
speed_map(Speed, thresh_mult_map_znn);
sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
speed_map(Speed, thresh_mult_map_vhpred);
sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
speed_map(Speed, thresh_mult_map_new2);
sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
speed_map(Speed, thresh_mult_map_split2);
// Special case for temporal layers.
// Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
// used as second reference. We don't modify thresholds for ALTREF case
// since ALTREF is usually used as long-term reference in temporal layers.
if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
(cpi->ref_frame_flags & VP8_LAST_FRAME) &&
(cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
if (cpi->closest_reference_frame == GOLDEN_FRAME) {
sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
} else {
sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
}
}
cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
cpi->mode_check_freq[THR_DC] = 0; /* always */
cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
speed_map(Speed, mode_check_freq_map_zn2);
cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
speed_map(Speed, mode_check_freq_map_near2);
cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
cpi->mode_check_freq[THR_B_PRED] =
speed_map(Speed, mode_check_freq_map_vhbpred);
// For real-time mode at speed 10 keep the mode_check_freq threshold
// for NEW1 similar to that of speed 9.
Speed2 = Speed;
if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
speed_map(Speed, mode_check_freq_map_new2);
cpi->mode_check_freq[THR_SPLIT1] =
speed_map(Speed, mode_check_freq_map_split1);
cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
speed_map(Speed, mode_check_freq_map_split2);
Speed = cpi->Speed;
switch (Mode) {
#if !CONFIG_REALTIME_ONLY
case 0: /* best quality mode */
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
break;
case 1:
case 3:
if (Speed > 0) {
/* Disable coefficient optimization above speed 0 */
sf->optimize_coefficients = 0;
sf->use_fastquant_for_pick = 1;
sf->no_skip_block4x4_search = 0;
sf->first_step = 1;
}
if (Speed > 2) {
sf->improved_quant = 0;
sf->improved_dct = 0;
/* Only do recode loop on key frames, golden frames and
* alt ref frames
*/
sf->recode_loop = 2;
}
if (Speed > 3) {
sf->auto_filter = 1;
sf->recode_loop = 0; /* recode loop off */
sf->RD = 0; /* Turn rd off */
}
if (Speed > 4) {
sf->auto_filter = 0; /* Faster selection of loop filter */
}
break;
#endif
case 2:
sf->optimize_coefficients = 0;
sf->recode_loop = 0;
sf->auto_filter = 1;
sf->iterative_sub_pixel = 1;
sf->search_method = NSTEP;
if (Speed > 0) {
sf->improved_quant = 0;
sf->improved_dct = 0;
sf->use_fastquant_for_pick = 1;
sf->no_skip_block4x4_search = 0;
sf->first_step = 1;
}
if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
if (Speed > 3) {
sf->RD = 0;
sf->auto_filter = 1;
}
if (Speed > 4) {
sf->auto_filter = 0; /* Faster selection of loop filter */
sf->search_method = HEX;
sf->iterative_sub_pixel = 0;
}
if (Speed > 6) {
unsigned int sum = 0;
unsigned int total_mbs = cm->MBs;
int thresh;
unsigned int total_skip;
int min = 2000;
if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
min >>= 7;
for (i = 0; i < min; ++i) {
sum += cpi->mb.error_bins[i];
}
total_skip = sum;
sum = 0;
/* i starts from 2 to make sure thresh started from 2048 */
for (; i < 1024; ++i) {
sum += cpi->mb.error_bins[i];
if (10 * sum >=
(unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
break;
}
}
i--;
thresh = (i << 7);
if (thresh < 2000) thresh = 2000;
if (ref_frames > 1) {
sf->thresh_mult[THR_NEW1] = thresh;
sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
sf->thresh_mult[THR_NEAR1] = thresh >> 1;
}
if (ref_frames > 2) {
sf->thresh_mult[THR_NEW2] = thresh << 1;
sf->thresh_mult[THR_NEAREST2] = thresh;
sf->thresh_mult[THR_NEAR2] = thresh;
}
if (ref_frames > 3) {
sf->thresh_mult[THR_NEW3] = thresh << 1;
sf->thresh_mult[THR_NEAREST3] = thresh;
sf->thresh_mult[THR_NEAR3] = thresh;
}
sf->improved_mv_pred = 0;
}
if (Speed > 8) sf->quarter_pixel_search = 0;
if (cm->version == 0) {
cm->filter_type = NORMAL_LOOPFILTER;
if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
} else {
cm->filter_type = SIMPLE_LOOPFILTER;
}
/* This has a big hit on quality. Last resort */
if (Speed >= 15) sf->half_pixel_search = 0;
memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
}; /* switch */
/* Slow quant, dct and trellis not worthwhile for first pass
* so make sure they are always turned off.
*/
if (cpi->pass == 1) {
sf->improved_quant = 0;
sf->optimize_coefficients = 0;
sf->improved_dct = 0;
}
if (cpi->sf.search_method == NSTEP) {
vp8_init3smotion_compensation(&cpi->mb,
cm->yv12_fb[cm->lst_fb_idx].y_stride);
} else if (cpi->sf.search_method == DIAMOND) {
vp8_init_dsmotion_compensation(&cpi->mb,
cm->yv12_fb[cm->lst_fb_idx].y_stride);
}
if (cpi->sf.improved_dct) {
cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
} else {
/* No fast FDCT defined for any platform at this time. */
cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
}
cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
if (cpi->sf.improved_quant) {
cpi->mb.quantize_b = vp8_regular_quantize_b;
} else {
cpi->mb.quantize_b = vp8_fast_quantize_b;
}
if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
if (cpi->sf.iterative_sub_pixel == 1) {
cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
} else if (cpi->sf.quarter_pixel_search) {
cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
} else if (cpi->sf.half_pixel_search) {
cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
} else {
cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
}
if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
cpi->mb.optimize = 1;
} else {
cpi->mb.optimize = 0;
}
if (cpi->common.full_pixel) {
cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
}
#ifdef SPEEDSTATS
frames_at_speed[cpi->Speed]++;
#endif
}
#undef GOOD
#undef RT
static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
#if VP8_TEMPORAL_ALT_REF
int width = (cpi->oxcf.Width + 15) & ~15;
int height = (cpi->oxcf.Height + 15) & ~15;
#endif
cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
cpi->oxcf.lag_in_frames);
if (!cpi->lookahead) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate lag buffers");
}
#if VP8_TEMPORAL_ALT_REF
if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
VP8BORDERINPIXELS)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate altref buffer");
}
#endif
}
static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
#if VP8_TEMPORAL_ALT_REF
vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
#endif
vp8_lookahead_destroy(cpi->lookahead);
}
static int vp8_alloc_partition_data(VP8_COMP *cpi) {
vpx_free(cpi->mb.pip);
cpi->mb.pip =
vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
sizeof(PARTITION_INFO));
if (!cpi->mb.pip) return 1;
cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
return 0;
}
void vp8_alloc_compressor_data(VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
int width = cm->Width;
int height = cm->Height;
#if CONFIG_MULTITHREAD
int prev_mb_rows = cm->mb_rows;
#endif
if (vp8_alloc_frame_buffers(cm, width, height)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffers");
}
if (vp8_alloc_partition_data(cpi)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate partition data");
}
if ((width & 0xf) != 0) width += 16 - (width & 0xf);
if ((height & 0xf) != 0) height += 16 - (height & 0xf);
if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
VP8BORDERINPIXELS)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
}
if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
VP8BORDERINPIXELS)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled source buffer");
}
vpx_free(cpi->tok);
{
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
#else
unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
#endif
CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
}
/* Data used for real time vc mode to see if gf needs refreshing */
cpi->zeromv_count = 0;
/* Structures used to monitor GF usage */
vpx_free(cpi->gf_active_flags);
CHECK_MEM_ERROR(
cpi->gf_active_flags,
vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
vpx_free(cpi->mb_activity_map);
CHECK_MEM_ERROR(
cpi->mb_activity_map,
vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
/* allocate memory for storing last frame's MVs for MV prediction. */
vpx_free(cpi->lfmv);
CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
sizeof(*cpi->lfmv)));
vpx_free(cpi->lf_ref_frame_sign_bias);
CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
sizeof(*cpi->lf_ref_frame_sign_bias)));
vpx_free(cpi->lf_ref_frame);
CHECK_MEM_ERROR(cpi->lf_ref_frame,
vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
sizeof(*cpi->lf_ref_frame)));
/* Create the encoder segmentation map and set all entries to 0 */
vpx_free(cpi->segmentation_map);
CHECK_MEM_ERROR(
cpi->segmentation_map,
vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
cpi->cyclic_refresh_mode_index = 0;
vpx_free(cpi->active_map);
CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols,
sizeof(*cpi->active_map)));
memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
#if CONFIG_MULTITHREAD
if (width < 640) {
cpi->mt_sync_range = 1;
} else if (width <= 1280) {
cpi->mt_sync_range = 4;
} else if (width <= 2560) {
cpi->mt_sync_range = 8;
} else {
cpi->mt_sync_range = 16;
}
if (cpi->oxcf.multi_threaded > 1) {
int i;
/* De-allocate and re-allocate mutex */
if (cpi->pmutex != NULL) {
for (i = 0; i < prev_mb_rows; ++i) {
pthread_mutex_destroy(&cpi->pmutex[i]);
}
vpx_free(cpi->pmutex);
cpi->pmutex = NULL;
}
CHECK_MEM_ERROR(cpi->pmutex,
vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
if (cpi->pmutex) {
for (i = 0; i < cm->mb_rows; ++i) {
pthread_mutex_init(&cpi->pmutex[i], NULL);
}
}
vpx_free(cpi->mt_current_mb_col);
CHECK_MEM_ERROR(cpi->mt_current_mb_col,
vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
}
#endif
vpx_free(cpi->tplist);
CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0) {
vp8_denoiser_free(&cpi->denoiser);
if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate denoiser");
}
}
#endif
}
/* Quant MOD */
static const int q_trans[] = {
0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
};
int vp8_reverse_trans(int x) {
int i;
for (i = 0; i < 64; ++i) {
if (q_trans[i] >= x) return i;
}
return 63;
}
void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
if (framerate < .1) framerate = 30;
cpi->framerate = framerate;
cpi->output_framerate = framerate;
cpi->per_frame_bandwidth =
(int)(cpi->oxcf.target_bandwidth / cpi->output_framerate);
cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
cpi->oxcf.two_pass_vbrmin_section / 100);
/* Set Maximum gf/arf interval */
cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
/* Extended interval for genuinely static scenes */
cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
/* Special conditions when altr ref frame enabled in lagged compress mode */
if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
}
if (cpi->twopass.static_scene_max_gf_interval >
cpi->oxcf.lag_in_frames - 1) {
cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
}
}
if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
}
}
static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
VP8_COMMON *cm = &cpi->common;
cpi->oxcf = *oxcf;
cpi->auto_gold = 1;
cpi->auto_adjust_gold_quantizer = 1;
cm->version = oxcf->Version;
vp8_setup_version(cm);
/* Frame rate is not available on the first frame, as it's derived from
* the observed timestamps. The actual value used here doesn't matter
* too much, as it will adapt quickly.
*/
if (oxcf->timebase.num > 0) {
cpi->framerate =
(double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
} else {
cpi->framerate = 30;
}
/* If the reciprocal of the timebase seems like a reasonable framerate,
* then use that as a guess, otherwise use 30.
*/
if (cpi->framerate > 180) cpi->framerate = 30;
cpi->ref_framerate = cpi->framerate;
cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
cm->refresh_golden_frame = 0;
cm->refresh_last_frame = 1;
cm->refresh_entropy_probs = 1;
/* change includes all joint functionality */
vp8_change_config(cpi, oxcf);
/* Initialize active best and worst q and average q values. */
cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
cpi->active_best_quality = cpi->oxcf.best_allowed_q;
cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
/* Initialise the starting buffer levels */
cpi->buffer_level = cpi->oxcf.starting_buffer_level;
cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
cpi->total_actual_bits = 0;
cpi->total_target_vs_actual = 0;
/* Temporal scalabilty */
if (cpi->oxcf.number_of_layers > 1) {
unsigned int i;
double prev_layer_framerate = 0;
for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
prev_layer_framerate =
cpi->output_framerate / cpi->oxcf.rate_decimator[i];
}
}
#if VP8_TEMPORAL_ALT_REF
{
int i;
cpi->fixed_divide[0] = 0;
for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
}
#endif
}
static void update_layer_contexts(VP8_COMP *cpi) {
VP8_CONFIG *oxcf = &cpi->oxcf;
/* Update snapshots of the layer contexts to reflect new parameters */
if (oxcf->number_of_layers > 1) {
unsigned int i;
double prev_layer_framerate = 0;
assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
lc->starting_buffer_level = rescale(
(int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
if (oxcf->optimal_buffer_level == 0) {
lc->optimal_buffer_level = lc->target_bandwidth / 8;
} else {
lc->optimal_buffer_level = rescale(
(int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
}
if (oxcf->maximum_buffer_size == 0) {
lc->maximum_buffer_size = lc->target_bandwidth / 8;
} else {
lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
lc->target_bandwidth, 1000);
}
/* Work out the average size of a frame within this layer */
if (i > 0) {
lc->avg_frame_size_for_layer =
(int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1000 / (lc->framerate - prev_layer_framerate));
}
prev_layer_framerate = lc->framerate;
}
}
}
void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
VP8_COMMON *cm = &cpi->common;
int last_w, last_h;
unsigned int prev_number_of_layers;
if (!cpi) return;
if (!oxcf) return;
if (cm->version != oxcf->Version) {
cm->version = oxcf->Version;
vp8_setup_version(cm);
}
last_w = cpi->oxcf.Width;
last_h = cpi->oxcf.Height;
prev_number_of_layers = cpi->oxcf.number_of_layers;
cpi->oxcf = *oxcf;
switch (cpi->oxcf.Mode) {
case MODE_REALTIME:
cpi->pass = 0;
cpi->compressor_speed = 2;
if (cpi->oxcf.cpu_used < -16) {
cpi->oxcf.cpu_used = -16;
}
if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
break;
case MODE_GOODQUALITY:
cpi->pass = 0;
cpi->compressor_speed = 1;
if (cpi->oxcf.cpu_used < -5) {
cpi->oxcf.cpu_used = -5;
}
if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
break;
case MODE_BESTQUALITY:
cpi->pass = 0;
cpi->compressor_speed = 0;
break;
case MODE_FIRSTPASS:
cpi->pass = 1;
cpi->compressor_speed = 1;
break;
case MODE_SECONDPASS:
cpi->pass = 2;
cpi->compressor_speed = 1;
if (cpi->oxcf.cpu_used < -5) {
cpi->oxcf.cpu_used = -5;
}
if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
break;
case MODE_SECONDPASS_BEST:
cpi->pass = 2;
cpi->compressor_speed = 0;
break;
}
if (cpi->pass == 0) cpi->auto_worst_q = 1;
cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
if (oxcf->fixed_q >= 0) {
if (oxcf->worst_allowed_q < 0) {
cpi->oxcf.fixed_q = q_trans[0];
} else {
cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
}
if (oxcf->alt_q < 0) {
cpi->oxcf.alt_q = q_trans[0];
} else {
cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
}
if (oxcf->key_q < 0) {
cpi->oxcf.key_q = q_trans[0];
} else {
cpi->oxcf.key_q = q_trans[oxcf->key_q];
}
if (oxcf->gold_q < 0) {
cpi->oxcf.gold_q = q_trans[0];
} else {
cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
}
}
cpi->baseline_gf_interval =
cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
// GF behavior for 1 pass CBR, used when error_resilience is off.
if (!cpi->oxcf.error_resilient_mode &&
cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
cpi->oxcf.Mode == MODE_REALTIME)
cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
cpi->oxcf.token_partitions = 3;
#endif
if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
}
setup_features(cpi);
{
int i;
for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
}
}
/* At the moment the first order values may not be > MAXQ */
if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
/* local file playback mode == really big buffer */
if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
cpi->oxcf.starting_buffer_level = 60000;
cpi->oxcf.optimal_buffer_level = 60000;
cpi->oxcf.maximum_buffer_size = 240000;
cpi->oxcf.starting_buffer_level_in_ms = 60000;
cpi->oxcf.optimal_buffer_level_in_ms = 60000;
cpi->oxcf.maximum_buffer_size_in_ms = 240000;
}
/* Convert target bandwidth from Kbit/s to Bit/s */
cpi->oxcf.target_bandwidth *= 1000;
cpi->oxcf.starting_buffer_level = rescale(
(int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
/* Set or reset optimal and maximum buffer levels. */
if (cpi->oxcf.optimal_buffer_level == 0) {
cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
} else {
cpi->oxcf.optimal_buffer_level = rescale(
(int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
}
if (cpi->oxcf.maximum_buffer_size == 0) {
cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
} else {
cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
cpi->oxcf.target_bandwidth, 1000);
}
// Under a configuration change, where maximum_buffer_size may change,
// keep buffer level clipped to the maximum allowed buffer size.
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
cpi->buffer_level = cpi->bits_off_target;
}
/* Set up frame rate and related parameters rate control values. */
vp8_new_framerate(cpi, cpi->framerate);
/* Set absolute upper and lower quality limits */
cpi->worst_quality = cpi->oxcf.worst_allowed_q;
cpi->best_quality = cpi->oxcf.best_allowed_q;
/* active values should only be modified if out of new range */
if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
}
/* less likely */
else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
}
if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
cpi->active_best_quality = cpi->oxcf.best_allowed_q;
}
/* less likely */
else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
}
cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
cpi->cq_target_quality = cpi->oxcf.cq_level;
/* Only allow dropped frames in buffered mode */
cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
// Check if the number of temporal layers has changed, and if so reset the
// pattern counter and set/initialize the temporal layer context for the
// new layer configuration.
if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
// If the number of temporal layers are changed we must start at the
// base of the pattern cycle, so set the layer id to 0 and reset
// the temporal pattern counter.
if (cpi->temporal_layer_id > 0) {
cpi->temporal_layer_id = 0;
}
cpi->temporal_pattern_counter = 0;
reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
}
if (!cpi->initial_width) {
cpi->initial_width = cpi->oxcf.Width;
cpi->initial_height = cpi->oxcf.Height;
}
cm->Width = cpi->oxcf.Width;
cm->Height = cpi->oxcf.Height;
assert(cm->Width <= cpi->initial_width);
assert(cm->Height <= cpi->initial_height);
/* TODO(jkoleszar): if an internal spatial resampling is active,
* and we downsize the input image, maybe we should clear the
* internal scale immediately rather than waiting for it to
* correct.
*/
/* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
cm->sharpness_level = cpi->oxcf.Sharpness;
if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) {
int hr, hs, vr, vs;
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
/* always go to the next whole number */
cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
}
if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
cpi->force_next_frame_intra = 1;
}
if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
dealloc_raw_frame_buffers(cpi);
alloc_raw_frame_buffers(cpi);
vp8_alloc_compressor_data(cpi);
}
if (cpi->oxcf.fixed_q >= 0) {
cpi->last_q[0] = cpi->oxcf.fixed_q;
cpi->last_q[1] = cpi->oxcf.fixed_q;
}
cpi->Speed = cpi->oxcf.cpu_used;
/* force to allowlag to 0 if lag_in_frames is 0; */
if (cpi->oxcf.lag_in_frames == 0) {
cpi->oxcf.allow_lag = 0;
}
/* Limit on lag buffers as these are not currently dynamically allocated */
else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
}
/* YX Temp */
cpi->alt_ref_source = NULL;
cpi->is_src_frame_alt_ref = 0;
#if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) {
if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
int width = (cpi->oxcf.Width + 15) & ~15;
int height = (cpi->oxcf.Height + 15) & ~15;
if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate denoiser");
}
}
}
#endif
#if 0
/* Experimental RD Code */
cpi->frame_distortion = 0;
cpi->last_frame_distortion = 0;
#endif
}
#ifndef M_LOG2_E
#define M_LOG2_E 0.693147180559945309417
#endif
#define log2f(x) (log(x) / (float)M_LOG2_E)
static void cal_mvsadcosts(int *mvsadcost[2]) {
int i = 1;
mvsadcost[0][0] = 300;
mvsadcost[1][0] = 300;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
mvsadcost[0][i] = (int)z;
mvsadcost[1][i] = (int)z;
mvsadcost[0][-i] = (int)z;
mvsadcost[1][-i] = (int)z;
} while (++i <= mvfp_max);
}
struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
VP8_COMP *cpi;
VP8_COMMON *cm;
cpi = vpx_memalign(32, sizeof(VP8_COMP));
/* Check that the CPI instance is valid */
if (!cpi) return 0;
cm = &cpi->common;
memset(cpi, 0, sizeof(VP8_COMP));
if (setjmp(cm->error.jmp)) {
cpi->common.error.setjmp = 0;
vp8_remove_compressor(&cpi);
return 0;
}
cpi->common.error.setjmp = 1;
CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site),
(MAX_MVSEARCH_STEPS * 8) + 1));
vp8_create_common(&cpi->common);
init_config(cpi, oxcf);
memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
sizeof(vp8cx_base_skip_false_prob));
cpi->common.current_video_frame = 0;
cpi->temporal_pattern_counter = 0;
cpi->temporal_layer_id = -1;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
cpi->frames_till_gf_update_due = 0;
cpi->gf_overspend_bits = 0;
cpi->non_gf_bitrate_adjustment = 0;
cpi->prob_last_coded = 128;
cpi->prob_gf_coded = 128;
cpi->prob_intra_coded = 63;
/* Prime the recent reference frame usage counters.
* Hereafter they will be maintained as a sort of moving average
*/
cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
/* Set reference frame sign bias for ALTREF frame to 1 (for now) */
cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
cpi->twopass.gf_decay_rate = 0;
cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
cpi->gold_is_last = 0;
cpi->alt_is_last = 0;
cpi->gold_is_alt = 0;
cpi->active_map_enabled = 0;
#if 0
/* Experimental code for lagged and one pass */
/* Initialise one_pass GF frames stats */
/* Update stats used for GF selection */
if (cpi->pass == 0)
{
cpi->one_pass_frame_index = 0;
for (i = 0; i < MAX_LAG_BUFFERS; ++i)
{
cpi->one_pass_frame_stats[i].frames_so_far = 0;
cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
}
}
#endif
cpi->mse_source_denoised = 0;
/* Should we use the cyclic refresh method.
* Currently there is no external control for this.
* Enable it for error_resilient_mode, or for 1 pass CBR mode.
*/
cpi->cyclic_refresh_mode_enabled =
(cpi->oxcf.error_resilient_mode ||
(cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
cpi->oxcf.Mode <= 2));
cpi->cyclic_refresh_mode_max_mbs_perframe =
(cpi->common.mb_rows * cpi->common.mb_cols) / 7;
if (cpi->oxcf.number_of_layers == 1) {
cpi->cyclic_refresh_mode_max_mbs_perframe =
(cpi->common.mb_rows * cpi->common.mb_cols) / 20;
} else if (cpi->oxcf.number_of_layers == 2) {
cpi->cyclic_refresh_mode_max_mbs_perframe =
(cpi->common.mb_rows * cpi->common.mb_cols) / 10;
}
cpi->cyclic_refresh_mode_index = 0;
cpi->cyclic_refresh_q = 32;
// GF behavior for 1 pass CBR, used when error_resilience is off.
cpi->gf_update_onepass_cbr = 0;
cpi->gf_noboost_onepass_cbr = 0;
if (!cpi->oxcf.error_resilient_mode &&
cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
cpi->gf_update_onepass_cbr = 1;
cpi->gf_noboost_onepass_cbr = 1;
cpi->gf_interval_onepass_cbr =
cpi->cyclic_refresh_mode_max_mbs_perframe > 0
? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
cpi->cyclic_refresh_mode_max_mbs_perframe)
: 10;
cpi->gf_interval_onepass_cbr =
VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
}
if (cpi->cyclic_refresh_mode_enabled) {
CHECK_MEM_ERROR(cpi->cyclic_refresh_map,
vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
} else {
cpi->cyclic_refresh_map = (signed char *)NULL;
}
CHECK_MEM_ERROR(cpi->consec_zero_last,
vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias,
vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
#ifdef VP8_ENTROPY_STATS
init_context_counters();
#endif
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90 << 12;
/* Give a sensible default for the first frame. */
cpi->frames_since_key = 8;
cpi->key_frame_frequency = cpi->oxcf.key_freq;
cpi->this_key_frame_forced = 0;
cpi->next_key_frame_forced = 0;
cpi->source_alt_ref_pending = 0;
cpi->source_alt_ref_active = 0;
cpi->common.refresh_alt_ref_frame = 0;
cpi->force_maxqp = 0;
cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
#if CONFIG_INTERNAL_STATS
cpi->b_calculate_ssimg = 0;
cpi->count = 0;
cpi->bytes = 0;
if (cpi->b_calculate_psnr) {
cpi->total_sq_error = 0.0;
cpi->total_sq_error2 = 0.0;
cpi->total_y = 0.0;
cpi->total_u = 0.0;
cpi->total_v = 0.0;
cpi->total = 0.0;
cpi->totalp_y = 0.0;
cpi->totalp_u = 0.0;
cpi->totalp_v = 0.0;
cpi->totalp = 0.0;
cpi->tot_recode_hits = 0;
cpi->summed_quality = 0;
cpi->summed_weights = 0;
}
#endif
cpi->first_time_stamp_ever = 0x7FFFFFFF;
cpi->frames_till_gf_update_due = 0;
cpi->key_frame_count = 1;
cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
cpi->ni_tot_qi = 0;
cpi->ni_frames = 0;
cpi->total_byte_count = 0;
cpi->drop_frame = 0;
cpi->rate_correction_factor = 1.0;
cpi->key_frame_rate_correction_factor = 1.0;
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
}
#ifdef OUTPUT_YUV_SRC
yuv_file = fopen("bd.yuv", "ab");
#endif
#ifdef OUTPUT_YUV_DENOISED
yuv_denoised_file = fopen("denoised.yuv", "ab");
#endif
#if 0
framepsnr = fopen("framepsnr.stt", "a");
kf_list = fopen("kf_list.stt", "w");
#endif
cpi->output_pkt_list = oxcf->output_pkt_list;
#if !CONFIG_REALTIME_ONLY
if (cpi->pass == 1) {
vp8_init_first_pass(cpi);
} else if (cpi->pass == 2) {
size_t packet_sz = sizeof(FIRSTPASS_STATS);
int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end =
(void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
vp8_init_second_pass(cpi);
}
#endif
if (cpi->compressor_speed == 2) {
cpi->avg_encode_time = 0;
cpi->avg_pick_mode_time = 0;
}
vp8_set_speed_features(cpi);
/* Set starting values of RD threshold multipliers (128 = *1) */
for (i = 0; i < MAX_MODES; ++i) {
cpi->mb.rd_thresh_mult[i] = 128;
}
#ifdef VP8_ENTROPY_STATS
init_mv_ref_counts();
#endif
#if CONFIG_MULTITHREAD
if (vp8cx_create_encoder_threads(cpi)) {
vp8_remove_compressor(&cpi);
return 0;
}
#endif
cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
#if ARCH_X86 || ARCH_X86_64
cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
#endif
cpi->full_search_sad = vp8_full_search_sad;
cpi->diamond_search_sad = vp8_diamond_search_sad;
cpi->refining_search_sad = vp8_refining_search_sad;
/* make sure frame 1 is okay */
cpi->mb.error_bins[0] = cpi->common.MBs;
/* vp8cx_init_quantizer() is first called here. Add check in
* vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
* called later when needed. This will avoid unnecessary calls of
* vp8cx_init_quantizer() for every frame.
*/
vp8cx_init_quantizer(cpi);
vp8_loop_filter_init(cm);
cpi->common.error.setjmp = 0;
#if CONFIG_MULTI_RES_ENCODING
/* Calculate # of MBs in a row in lower-resolution level image. */
if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
#endif
/* setup RD costs to MACROBLOCK struct */
cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
cal_mvsadcosts(cpi->mb.mvsadcost);
cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
cpi->mb.token_costs = cpi->rd_costs.token_costs;
/* setup block ptrs & offsets */
vp8_setup_block_ptrs(&cpi->mb);
vp8_setup_block_dptrs(&cpi->mb.e_mbd);
return cpi;
}
void vp8_remove_compressor(VP8_COMP **ptr) {
VP8_COMP *cpi = *ptr;
if (!cpi) return;
if (cpi && (cpi->common.current_video_frame > 0)) {
#if !CONFIG_REALTIME_ONLY
if (cpi->pass == 2) {
vp8_end_second_pass(cpi);
}
#endif
#ifdef VP8_ENTROPY_STATS
print_context_counters();
print_tree_update_probs();
print_mode_context();
#endif
#if CONFIG_INTERNAL_STATS
if (cpi->pass != 1) {
FILE *f = fopen("opsnr.stt", "a");
double time_encoded =
(cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
10000000.000;
double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
if (cpi->b_calculate_psnr) {
if (cpi->oxcf.number_of_layers > 1) {
int i;
fprintf(f,
"Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
"GLPsnrP\tVPXSSIM\n");
for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
double dr =
(double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
cpi->common.Width * cpi->common.Height;
double total_psnr =
vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
double total_psnr2 =
vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
double total_ssim =
100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
fprintf(f,
"%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
"%7.3f\t%7.3f\n",
i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
total_psnr2, total_ssim);
}
} else {
double samples =
3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
double total_psnr =
vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
double total_psnr2 =
vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
double total_ssim =
100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
fprintf(f,
"Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
"GLPsnrP\tVPXSSIM\n");
fprintf(f,
"%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
"%7.3f\n",
dr, cpi->total / cpi->count, total_psnr,
cpi->totalp / cpi->count, total_psnr2, total_ssim);
}
}
fclose(f);
#if 0
f = fopen("qskip.stt", "a");
fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
fclose(f);
#endif
}
#endif
#ifdef SPEEDSTATS
if (cpi->compressor_speed == 2) {
int i;
FILE *f = fopen("cxspeed.stt", "a");
cnt_pm /= cpi->common.MBs;
for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
fprintf(f, "\n");
fclose(f);
}
#endif
#ifdef MODE_STATS
{
extern int count_mb_seg[4];
FILE *f = fopen("modes.stt", "a");
double dr = (double)cpi->framerate * (double)bytes * (double)8 /
(double)count / (double)1000;
fprintf(f, "intra_mode in Intra Frames:\n");
fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
y_modes[2], y_modes[3], y_modes[4]);
fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
uv_modes[2], uv_modes[3]);
fprintf(f, "B: ");
{
int i;
for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
fprintf(f, "\n");
}
fprintf(f, "Modes in Inter Frames:\n");
fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
inter_y_modes[9]);
fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
fprintf(f, "B: ");
{
int i;
for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
fprintf(f, "\n");
}
fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
count_mb_seg[2], count_mb_seg[3]);
fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
inter_b_modes[NEW4X4]);
fclose(f);
}
#endif
#ifdef VP8_ENTROPY_STATS
{
int i, j, k;
FILE *fmode = fopen("modecontext.c", "w");
fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
fprintf(fmode,
"[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
for (i = 0; i < 10; ++i) {
fprintf(fmode, " { /* Above Mode : %d */\n", i);
for (j = 0; j < 10; ++j) {
fprintf(fmode, " {");
for (k = 0; k < 10; ++k) {
if (!intra_mode_stats[i][j][k])
fprintf(fmode, " %5d, ", 1);
else
fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
}
fprintf(fmode, "}, /* left_mode %d */\n", j);
}
fprintf(fmode, " },\n");
}
fprintf(fmode, "};\n");
fclose(fmode);
}
#endif
#if defined(SECTIONBITS_OUTPUT)
if (0) {
int i;
FILE *f = fopen("tokenbits.stt", "a");
for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
fprintf(f, "\n");
fclose(f);
}
#endif
#if 0
{
printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
}
#endif
}
#if CONFIG_MULTITHREAD
vp8cx_remove_encoder_threads(cpi);
#endif
#if CONFIG_TEMPORAL_DENOISING
vp8_denoiser_free(&cpi->denoiser);
#endif
dealloc_compressor_data(cpi);
vpx_free(cpi->mb.ss);
vpx_free(cpi->tok);
vpx_free(cpi->cyclic_refresh_map);
vpx_free(cpi->consec_zero_last);
vpx_free(cpi->consec_zero_last_mvbias);
vp8_remove_common(&cpi->common);
vpx_free(cpi);
*ptr = 0;
#ifdef OUTPUT_YUV_SRC
fclose(yuv_file);
#endif
#ifdef OUTPUT_YUV_DENOISED
fclose(yuv_denoised_file);
#endif
#if 0
if (keyfile)
fclose(keyfile);
if (framepsnr)
fclose(framepsnr);
if (kf_list)
fclose(kf_list);
#endif
}
static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
unsigned char *recon, int recon_stride,
unsigned int cols, unsigned int rows) {
unsigned int row, col;
uint64_t total_sse = 0;
int diff;
for (row = 0; row + 16 <= rows; row += 16) {
for (col = 0; col + 16 <= cols; col += 16) {
unsigned int sse;
vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
total_sse += sse;
}
/* Handle odd-sized width */
if (col < cols) {
unsigned int border_row, border_col;
unsigned char *border_orig = orig;
unsigned char *border_recon = recon;
for (border_row = 0; border_row < 16; ++border_row) {
for (border_col = col; border_col < cols; ++border_col) {
diff = border_orig[border_col] - border_recon[border_col];
total_sse += diff * diff;
}
border_orig += orig_stride;
border_recon += recon_stride;
}
}
orig += orig_stride * 16;
recon += recon_stride * 16;
}
/* Handle odd-sized height */
for (; row < rows; ++row) {
for (col = 0; col < cols; ++col) {
diff = orig[col] - recon[col];
total_sse += diff * diff;
}
orig += orig_stride;
recon += recon_stride;
}
vpx_clear_system_state();
return total_sse;
}
static void generate_psnr_packet(VP8_COMP *cpi) {
YV12_BUFFER_CONFIG *orig = cpi->Source;
YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
struct vpx_codec_cx_pkt pkt;
uint64_t sse;
int i;
unsigned int width = cpi->common.Width;
unsigned int height = cpi->common.Height;
pkt.kind = VPX_CODEC_PSNR_PKT;
sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
recon->y_stride, width, height);
pkt.data.psnr.sse[0] = sse;
pkt.data.psnr.sse[1] = sse;
pkt.data.psnr.samples[0] = width * height;
pkt.data.psnr.samples[1] = width * height;
width = (width + 1) / 2;
height = (height + 1) / 2;
sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
recon->uv_stride, width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[2] = sse;
pkt.data.psnr.samples[0] += width * height;
pkt.data.psnr.samples[2] = width * height;
sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
recon->uv_stride, width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[3] = sse;
pkt.data.psnr.samples[0] += width * height;
pkt.data.psnr.samples[3] = width * height;
for (i = 0; i < 4; ++i) {
pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
(double)(pkt.data.psnr.sse[i]));
}
vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
}
int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
if (ref_frame_flags > 7) return -1;
cpi->ref_frame_flags = ref_frame_flags;
return 0;
}
int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
if (ref_frame_flags > 7) return -1;
cpi->common.refresh_golden_frame = 0;
cpi->common.refresh_alt_ref_frame = 0;
cpi->common.refresh_last_frame = 0;
if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
return 0;
}
int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
VP8_COMMON *cm = &cpi->common;
int ref_fb_idx;
if (ref_frame_flag == VP8_LAST_FRAME) {
ref_fb_idx = cm->lst_fb_idx;
} else if (ref_frame_flag == VP8_GOLD_FRAME) {
ref_fb_idx = cm->gld_fb_idx;
} else if (ref_frame_flag == VP8_ALTR_FRAME) {
ref_fb_idx = cm->alt_fb_idx;
} else {
return -1;
}
vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
return 0;
}
int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
VP8_COMMON *cm = &cpi->common;
int ref_fb_idx;
if (ref_frame_flag == VP8_LAST_FRAME) {
ref_fb_idx = cm->lst_fb_idx;
} else if (ref_frame_flag == VP8_GOLD_FRAME) {
ref_fb_idx = cm->gld_fb_idx;
} else if (ref_frame_flag == VP8_ALTR_FRAME) {
ref_fb_idx = cm->alt_fb_idx;
} else {
return -1;
}
vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
return 0;
}
int vp8_update_entropy(VP8_COMP *cpi, int update) {
VP8_COMMON *cm = &cpi->common;
cm->refresh_entropy_probs = update;
return 0;
}
#if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) {
unsigned char *src = s->y_buffer;
int h = s->y_height;
do {
fwrite(src, s->y_width, 1, yuv_file);
src += s->y_stride;
} while (--h);
src = s->u_buffer;
h = s->uv_height;
do {
fwrite(src, s->uv_width, 1, yuv_file);
src += s->uv_stride;
} while (--h);
src = s->v_buffer;
h = s->uv_height;
do {
fwrite(src, s->uv_width, 1, yuv_file);
src += s->uv_stride;
} while (--h);
}
#endif
static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
/* are we resizing the image */
if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
#if CONFIG_SPATIAL_RESAMPLING
int hr, hs, vr, vs;
int tmp_height;
if (cm->vert_scale == 3) {
tmp_height = 9;
} else {
tmp_height = 11;
}
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
tmp_height, hs, hr, vs, vr, 0);
vp8_yv12_extend_frame_borders(&cpi->scaled_source);
cpi->Source = &cpi->scaled_source;
#endif
} else {
cpi->Source = sd;
}
}
static int resize_key_frame(VP8_COMP *cpi) {
#if CONFIG_SPATIAL_RESAMPLING
VP8_COMMON *cm = &cpi->common;
/* Do we need to apply resampling for one pass cbr.
* In one pass this is more limited than in two pass cbr.
* The test and any change is only made once per key frame sequence.
*/
if (cpi->oxcf.allow_spatial_resampling &&
(cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
int hr, hs, vr, vs;
int new_width, new_height;
/* If we are below the resample DOWN watermark then scale down a
* notch.
*/
if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
cpi->oxcf.optimal_buffer_level / 100)) {
cm->horiz_scale =
(cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
}
/* Should we now start scaling back up */
else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
cpi->oxcf.optimal_buffer_level / 100)) {
cm->horiz_scale =
(cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
}
/* Get the new height and width */
Scale2Ratio(cm->horiz_scale, &hr, &hs);
Scale2Ratio(cm->vert_scale, &vr, &vs);
new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
/* If the image size has changed we need to reallocate the buffers
* and resample the source image
*/
if ((cm->Width != new_width<