Fix yet more overflows in amrwbenc
Bug: 28786034
Change-Id: I619e63df93a562a1e3b017d07a8414776a00b855
diff --git a/media/libstagefright/codecs/amrwbenc/src/convolve.c b/media/libstagefright/codecs/amrwbenc/src/convolve.c
index 9b8b3aa..8c24414 100644
--- a/media/libstagefright/codecs/amrwbenc/src/convolve.c
+++ b/media/libstagefright/codecs/amrwbenc/src/convolve.c
@@ -47,48 +47,53 @@
s = vo_mult32((*tmpX++), (*tmpH--));i--;
while(i>0)
{
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
i -= 4;
}
- y[n] = ((s<<1) + 0x8000)>>16;
+ y[n] = voround(L_shl(s, 1));
n++;
tmpH = h+n;
tmpX = x;
i=n+1;
- s = vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
+ s = vo_mult32((*tmpX++), (*tmpH--));
+ i--;
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ i--;
while(i>0)
{
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
i -= 4;
}
- y[n] = ((s<<1) + 0x8000)>>16;
+ y[n] = voround(L_shl(s, 1));
n++;
tmpH = h+n;
tmpX = x;
i=n+1;
- s = vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
- s += vo_mult32((*tmpX++), (*tmpH--));i--;
+ s = vo_mult32((*tmpX++), (*tmpH--));
+ i--;
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ i--;
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ i--;
while(i>0)
{
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
i -= 4;
}
- y[n] = ((s<<1) + 0x8000)>>16;
+ y[n] = voround(L_shl(s, 1));
n++;
s = 0;
@@ -97,13 +102,13 @@
i=n+1;
while(i>0)
{
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
- s += vo_mult32((*tmpX++), (*tmpH--));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+ s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
i -= 4;
}
- y[n] = ((s<<1) + 0x8000)>>16;
+ y[n] = voround(L_shl(s, 1));
n++;
}
return;
diff --git a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
index de2a221..b453b25 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
@@ -84,8 +84,8 @@
/* Find interval to compute normalized correlation */
- t_min = t0_min - L_INTERPOL1;
- t_max = t0_max + L_INTERPOL1;
+ t_min = L_sub(t0_min, L_INTERPOL1);
+ t_max = L_add(t0_max, L_INTERPOL1);
corr = &corr_v[-t_min];
/* Compute normalized correlation between target and filtered excitation */
#ifdef ASM_OPT /* asm optimization branch */
@@ -188,15 +188,15 @@
L_tmp = 0;
for (i = 0; i < 64; i+=4)
{
- L_tmp += (xn[i] * xn[i]);
- L_tmp += (xn[i+1] * xn[i+1]);
- L_tmp += (xn[i+2] * xn[i+2]);
- L_tmp += (xn[i+3] * xn[i+3]);
+ L_tmp = L_add(L_tmp, (xn[i] * xn[i]));
+ L_tmp = L_add(L_tmp, (xn[i+1] * xn[i+1]));
+ L_tmp = L_add(L_tmp, (xn[i+2] * xn[i+2]));
+ L_tmp = L_add(L_tmp, (xn[i+3] * xn[i+3]));
}
- L_tmp = (L_tmp << 1) + 1;
+ L_tmp = L_add(L_shl(L_tmp, 1), 1);
exp = norm_l(L_tmp);
- exp = (32 - exp);
+ exp = L_sub(32, exp);
//exp = exp + 2; /* energy of xn[] x 2 + rounded up */
scale = -(exp >> 1); /* (1<<scale) < 1/sqrt(energy rounded) */
@@ -209,36 +209,36 @@
L_tmp1 = 0;
for (i = 0; i < 64; i+=4)
{
- L_tmp += (xn[i] * excf[i]);
- L_tmp1 += (excf[i] * excf[i]);
- L_tmp += (xn[i+1] * excf[i+1]);
- L_tmp1 += (excf[i+1] * excf[i+1]);
- L_tmp += (xn[i+2] * excf[i+2]);
- L_tmp1 += (excf[i+2] * excf[i+2]);
- L_tmp += (xn[i+3] * excf[i+3]);
- L_tmp1 += (excf[i+3] * excf[i+3]);
+ L_tmp = L_add(L_tmp, (xn[i] * excf[i]));
+ L_tmp1 = L_add(L_tmp1, (excf[i] * excf[i]));
+ L_tmp = L_add(L_tmp, (xn[i+1] * excf[i+1]));
+ L_tmp1 = L_add(L_tmp1, (excf[i+1] * excf[i+1]));
+ L_tmp = L_add(L_tmp, (xn[i+2] * excf[i+2]));
+ L_tmp1 = L_add(L_tmp1, (excf[i+2] * excf[i+2]));
+ L_tmp = L_add(L_tmp, (xn[i+3] * excf[i+3]));
+ L_tmp1 = L_add(L_tmp1, (excf[i+3] * excf[i+3]));
}
- L_tmp = (L_tmp << 1) + 1;
- L_tmp1 = (L_tmp1 << 1) + 1;
+ L_tmp = L_add(L_shl(L_tmp, 1), 1);
+ L_tmp1 = L_add(L_shl(L_tmp1, 1), 1);
exp = norm_l(L_tmp);
- L_tmp = (L_tmp << exp);
- exp_corr = (30 - exp);
+ L_tmp = L_shl(L_tmp, exp);
+ exp_corr = L_sub(30, exp);
corr = extract_h(L_tmp);
exp = norm_l(L_tmp1);
- L_tmp = (L_tmp1 << exp);
- exp_norm = (30 - exp);
+ L_tmp = L_shl(L_tmp1, exp);
+ exp_norm = L_sub(30, exp);
Isqrt_n(&L_tmp, &exp_norm);
norm = extract_h(L_tmp);
/* Normalize correlation = correlation * (1/sqrt(energy)) */
- L_tmp = vo_L_mult(corr, norm);
+ L_tmp = L_mult(corr, norm);
- L_tmp2 = exp_corr + exp_norm + scale;
+ L_tmp2 = L_add(exp_corr, exp_norm + scale);
if(L_tmp2 < 0)
{
L_tmp2 = -L_tmp2;
@@ -246,10 +246,10 @@
}
else
{
- L_tmp = L_tmp << L_tmp2;
+ L_tmp = L_shl(L_tmp, L_tmp2);
}
- corr_norm[t] = vo_round(L_tmp);
+ corr_norm[t] = voround(L_tmp);
/* modify the filtered excitation excf[] for the next iteration */
if(t != t_max)
@@ -310,13 +310,13 @@
ptr = &(inter4_1[k][0]);
L_sum = vo_mult32(x[0], (*ptr++));
- L_sum += vo_mult32(x[1], (*ptr++));
- L_sum += vo_mult32(x[2], (*ptr++));
- L_sum += vo_mult32(x[3], (*ptr++));
- L_sum += vo_mult32(x[4], (*ptr++));
- L_sum += vo_mult32(x[5], (*ptr++));
- L_sum += vo_mult32(x[6], (*ptr++));
- L_sum += vo_mult32(x[7], (*ptr++));
+ L_sum = L_add(L_sum, vo_mult32(x[1], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[2], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[3], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[4], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[5], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[6], (*ptr++)));
+ L_sum = L_add(L_sum, vo_mult32(x[7], (*ptr++)));
sum = extract_h(L_add(L_shl2(L_sum, 2), 0x8000));
return (sum);
diff --git a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
index d59f129..b908ff8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
@@ -810,10 +810,10 @@
vo_p2 = vo_p0-1;
for (j = 1; j <= M/4; j++)
{
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
- L_tmp -= *vo_p1++ * *vo_p2--;
+ L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+ L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+ L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+ L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
}
*vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
}
@@ -1205,7 +1205,7 @@
*------------------------------------------------------*/
/* y2 in Q9, gain_pit in Q14 */
- L_tmp = (gain_code * y2[L_SUBFR - 1])<<1;
+ L_tmp = L_mult(gain_code, y2[L_SUBFR - 1]);
L_tmp = L_shl(L_tmp, (5 + shift));
L_tmp = L_negate(L_tmp);
L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
@@ -1220,8 +1220,8 @@
{
Word32 tmp;
/* code in Q9, gain_pit in Q14 */
- L_tmp = (gain_code * code[i])<<1;
- L_tmp = (L_tmp << 5);
+ L_tmp = L_mult(gain_code, code[i]);
+ L_tmp = L_shl(L_tmp, 5);
tmp = L_mult(exc[i + i_subfr], gain_pit); // (exc[i + i_subfr] * gain_pit)<<1
L_tmp = L_add(L_tmp, tmp);
L_tmp = L_shl2(L_tmp, 1);