EchoCancellationImpl::ProcessRenderAudio: Use float samples directly

This patch lets EchoCancellationImpl::ProcessRenderAudio ask the given
AudioBuffer for float sample data directly, instead of asking for
int16 samples and then converting manually.

Since EchoCancellationImpl::ProcessRenderAudio takes a const
AudioBuffer*, it was necessary to add some const accessors for float
data to AudioBuffer.

R=aluebs@webrtc.org, andrew@webrtc.org, bjornv@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/14749004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@6590 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_processing/aec/aec_resampler.c b/webrtc/modules/audio_processing/aec/aec_resampler.c
index 5382665..469b811 100644
--- a/webrtc/modules/audio_processing/aec/aec_resampler.c
+++ b/webrtc/modules/audio_processing/aec/aec_resampler.c
@@ -26,7 +26,7 @@
 };
 
 typedef struct {
-  short buffer[kResamplerBufferSize];
+  float buffer[kResamplerBufferSize];
   float position;
 
   int deviceSampleRateHz;
@@ -71,15 +71,15 @@
 }
 
 void WebRtcAec_ResampleLinear(void* resampInst,
-                              const short* inspeech,
+                              const float* inspeech,
                               int size,
                               float skew,
-                              short* outspeech,
+                              float* outspeech,
                               int* size_out) {
   resampler_t* obj = (resampler_t*)resampInst;
 
-  short* y;
-  float be, tnew, interp;
+  float* y;
+  float be, tnew;
   int tn, mm;
 
   assert(!(size < 0 || size > 2 * FRAME_LEN));
@@ -91,7 +91,7 @@
   // Add new frame data in lookahead
   memcpy(&obj->buffer[FRAME_LEN + kResamplingDelay],
          inspeech,
-         size * sizeof(short));
+         size * sizeof(inspeech[0]));
 
   // Sample rate ratio
   be = 1 + skew;
@@ -106,15 +106,7 @@
   while (tn < size) {
 
     // Interpolation
-    interp = y[tn] + (tnew - tn) * (y[tn + 1] - y[tn]);
-
-    if (interp > 32767) {
-      interp = 32767;
-    } else if (interp < -32768) {
-      interp = -32768;
-    }
-
-    outspeech[mm] = (short)interp;
+    outspeech[mm] = y[tn] + (tnew - tn) * (y[tn + 1] - y[tn]);
     mm++;
 
     tnew = be * mm + obj->position;
@@ -127,7 +119,7 @@
   // Shift buffer
   memmove(obj->buffer,
           &obj->buffer[size],
-          (kResamplerBufferSize - size) * sizeof(short));
+          (kResamplerBufferSize - size) * sizeof(obj->buffer[0]));
 }
 
 int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst) {
diff --git a/webrtc/modules/audio_processing/aec/aec_resampler.h b/webrtc/modules/audio_processing/aec/aec_resampler.h
index e42c056..73e2821 100644
--- a/webrtc/modules/audio_processing/aec/aec_resampler.h
+++ b/webrtc/modules/audio_processing/aec/aec_resampler.h
@@ -30,10 +30,10 @@
 
 // Resamples input using linear interpolation.
 void WebRtcAec_ResampleLinear(void* resampInst,
-                              const short* inspeech,
+                              const float* inspeech,
                               int size,
                               float skew,
-                              short* outspeech,
+                              float* outspeech,
                               int* size_out);
 
 #endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation.c b/webrtc/modules/audio_processing/aec/echo_cancellation.c
index ba3b924..b58edcb 100644
--- a/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -294,17 +294,12 @@
 
 // only buffer L band for farend
 int32_t WebRtcAec_BufferFarend(void* aecInst,
-                               const int16_t* farend,
+                               const float* farend,
                                int16_t nrOfSamples) {
   aecpc_t* aecpc = aecInst;
-  int32_t retVal = 0;
   int newNrOfSamples = (int)nrOfSamples;
-  short newFarend[MAX_RESAMP_LEN];
-  const int16_t* farend_ptr = farend;
-  float tmp_farend[MAX_RESAMP_LEN];
-  const float* farend_float = tmp_farend;
-  float skew;
-  int i = 0;
+  float new_farend[MAX_RESAMP_LEN];
+  const float* farend_ptr = farend;
 
   if (farend == NULL) {
     aecpc->lastError = AEC_NULL_POINTER_ERROR;
@@ -322,17 +317,15 @@
     return -1;
   }
 
-  skew = aecpc->skew;
-
   if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
     // Resample and get a new number of samples
     WebRtcAec_ResampleLinear(aecpc->resampler,
                              farend,
                              nrOfSamples,
-                             skew,
-                             newFarend,
+                             aecpc->skew,
+                             new_farend,
                              &newNrOfSamples);
-    farend_ptr = (const int16_t*)newFarend;
+    farend_ptr = new_farend;
   }
 
   aecpc->farend_started = 1;
@@ -343,32 +336,31 @@
   WebRtc_WriteBuffer(
       aecpc->far_pre_buf_s16, farend_ptr, (size_t)newNrOfSamples);
 #endif
-  // Cast to float and write the time-domain data to |far_pre_buf|.
-  for (i = 0; i < newNrOfSamples; i++) {
-    tmp_farend[i] = (float)farend_ptr[i];
-  }
-  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_float, (size_t)newNrOfSamples);
+  // Write the time-domain data to |far_pre_buf|.
+  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, (size_t)newNrOfSamples);
 
   // Transform to frequency domain if we have enough data.
   while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
     // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
-    WebRtc_ReadBuffer(
-        aecpc->far_pre_buf, (void**)&farend_float, tmp_farend, PART_LEN2);
-
-    WebRtcAec_BufferFarendPartition(aecpc->aec, farend_float);
+    {
+      float* ptmp;
+      float tmp[PART_LEN2];
+      WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**)&ptmp, tmp, PART_LEN2);
+      WebRtcAec_BufferFarendPartition(aecpc->aec, ptmp);
+    }
 
     // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
     WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
 #ifdef WEBRTC_AEC_DEBUG_DUMP
     WebRtc_ReadBuffer(
-        aecpc->far_pre_buf_s16, (void**)&farend_ptr, newFarend, PART_LEN2);
+        aecpc->far_pre_buf_s16, (void**)&farend_ptr, new_farend, PART_LEN2);
     WebRtc_WriteBuffer(
         WebRtcAec_far_time_buf(aecpc->aec), &farend_ptr[PART_LEN], 1);
     WebRtc_MoveReadPtr(aecpc->far_pre_buf_s16, -PART_LEN);
 #endif
   }
 
-  return retVal;
+  return 0;
 }
 
 int32_t WebRtcAec_Process(void* aecInst,
diff --git a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
index dc64a34..0cf6a5a 100644
--- a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
+++ b/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
@@ -114,7 +114,7 @@
  * Inputs                       Description
  * -------------------------------------------------------------------
  * void*          aecInst       Pointer to the AEC instance
- * int16_t*       farend        In buffer containing one frame of
+ * const float*   farend        In buffer containing one frame of
  *                              farend signal for L band
  * int16_t        nrOfSamples   Number of samples in farend buffer
  *
@@ -124,7 +124,7 @@
  *                             -1: error
  */
 int32_t WebRtcAec_BufferFarend(void* aecInst,
-                               const int16_t* farend,
+                               const float* farend,
                                int16_t nrOfSamples);
 
 /*
diff --git a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
index 5fbc560..f81ce47 100644
--- a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -47,7 +47,7 @@
   int samples_per_frame_;
   // Dummy input/output speech data.
   static const int kSamplesPerChunk = 160;
-  int16_t far_[kSamplesPerChunk];
+  float far_[kSamplesPerChunk];
   float near_[kSamplesPerChunk];
   float out_[kSamplesPerChunk];
 };
@@ -55,9 +55,10 @@
 SystemDelayTest::SystemDelayTest()
     : handle_(NULL), self_(NULL), samples_per_frame_(0) {
   // Dummy input data are set with more or less arbitrary non-zero values.
-  memset(far_, 1, sizeof(far_));
-  for (int i = 0; i < kSamplesPerChunk; i++)
+  for (int i = 0; i < kSamplesPerChunk; i++) {
+    far_[i] = 257.0;
     near_[i] = 514.0;
+  }
   memset(out_, 0, sizeof(out_));
 }
 
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index b0f1eb6..35e1eb7 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -294,11 +294,16 @@
   return const_cast<int16_t*>(t->data(channel));
 }
 
-float* AudioBuffer::data_f(int channel) {
+const float* AudioBuffer::data_f(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   return channels_->fbuf()->channel(channel);
 }
 
+float* AudioBuffer::data_f(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<float*>(t->data_f(channel));
+}
+
 const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   return split_channels_.get() ? split_channels_->low_channel(channel)
@@ -310,12 +315,17 @@
   return const_cast<int16_t*>(t->low_pass_split_data(channel));
 }
 
-float* AudioBuffer::low_pass_split_data_f(int channel) {
+const float* AudioBuffer::low_pass_split_data_f(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   return split_channels_.get() ? split_channels_->low_channel_f(channel)
                                : data_f(channel);
 }
 
+float* AudioBuffer::low_pass_split_data_f(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<float*>(t->low_pass_split_data_f(channel));
+}
+
 const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   return split_channels_.get() ? split_channels_->high_channel(channel) : NULL;
@@ -326,12 +336,17 @@
   return const_cast<int16_t*>(t->high_pass_split_data(channel));
 }
 
-float* AudioBuffer::high_pass_split_data_f(int channel) {
+const float* AudioBuffer::high_pass_split_data_f(int channel) const {
   assert(channel >= 0 && channel < num_proc_channels_);
   return split_channels_.get() ? split_channels_->high_channel_f(channel)
                                : NULL;
 }
 
+float* AudioBuffer::high_pass_split_data_f(int channel) {
+  const AudioBuffer* t = this;
+  return const_cast<float*>(t->high_pass_split_data_f(channel));
+}
+
 const int16_t* AudioBuffer::mixed_data(int channel) const {
   assert(channel >= 0 && channel < num_mixed_channels_);
 
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index 67e4f48..2fab814 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -69,8 +69,11 @@
   // Float versions of the accessors, with automatic conversion back and forth
   // as necessary. The range of the numbers are the same as for int16_t.
   float* data_f(int channel);
+  const float* data_f(int channel) const;
   float* low_pass_split_data_f(int channel);
+  const float* low_pass_split_data_f(int channel) const;
   float* high_pass_split_data_f(int channel);
+  const float* high_pass_split_data_f(int channel) const;
 
   const float* keyboard_data() const;
 
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index e770f9f..47b4f18 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -89,7 +89,7 @@
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       err = WebRtcAec_BufferFarend(
           my_handle,
-          audio->low_pass_split_data(j),
+          audio->low_pass_split_data_f(j),
           static_cast<int16_t>(audio->samples_per_split_channel()));
 
       if (err != apm_->kNoError) {