Simplify audio_buffer APIs

Now there is only one API to get the data or the channels (one const and one no const) merged or by band.
The band is passed in as a parameter, instead of calling different methods.

BUG=webrtc:3146
R=andrew@webrtc.org, bjornv@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/27249004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7790 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index 33af43e..696c5b9 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -199,16 +199,15 @@
   activity_ = AudioFrame::kVadUnknown;
 }
 
-const int16_t* AudioBuffer::data(int channel) const {
-  return channels_->ibuf_const()->channel(channel);
+const int16_t* AudioBuffer::data_const(int channel) const {
+  return channels_const()[channel];
 }
 
 int16_t* AudioBuffer::data(int channel) {
-  mixed_low_pass_valid_ = false;
-  return channels_->ibuf()->channel(channel);
+  return channels()[channel];
 }
 
-const int16_t* const* AudioBuffer::channels() const {
+const int16_t* const* AudioBuffer::channels_const() const {
   return channels_->ibuf_const()->channels();
 }
 
@@ -217,16 +216,42 @@
   return channels_->ibuf()->channels();
 }
 
-const float* AudioBuffer::data_f(int channel) const {
-  return channels_->fbuf_const()->channel(channel);
+const int16_t* AudioBuffer::split_data_const(int channel, Band band) const {
+  const int16_t* const* chs = split_channels_const(band);
+  return chs ? chs[channel] : NULL;
+}
+
+int16_t* AudioBuffer::split_data(int channel, Band band) {
+  int16_t* const* chs = split_channels(band);
+  return chs ? chs[channel] : NULL;
+}
+
+const int16_t* const* AudioBuffer::split_channels_const(Band band) const {
+  if (split_channels_.size() > static_cast<size_t>(band)) {
+    return split_channels_[band]->ibuf_const()->channels();
+  } else {
+    return band == kBand0To8kHz ? channels_->ibuf_const()->channels() : NULL;
+  }
+}
+
+int16_t* const* AudioBuffer::split_channels(Band band) {
+  mixed_low_pass_valid_ = false;
+  if (split_channels_.size() > static_cast<size_t>(band)) {
+    return split_channels_[band]->ibuf()->channels();
+  } else {
+    return band == kBand0To8kHz ? channels_->ibuf()->channels() : NULL;
+  }
+}
+
+const float* AudioBuffer::data_const_f(int channel) const {
+  return channels_const_f()[channel];
 }
 
 float* AudioBuffer::data_f(int channel) {
-  mixed_low_pass_valid_ = false;
-  return channels_->fbuf()->channel(channel);
+  return channels_f()[channel];
 }
 
-const float* const* AudioBuffer::channels_f() const {
+const float* const* AudioBuffer::channels_const_f() const {
   return channels_->fbuf_const()->channels();
 }
 
@@ -235,114 +260,31 @@
   return channels_->fbuf()->channels();
 }
 
-const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
-  return split_channels_.size() > 0
-      ? split_channels_[0]->ibuf_const()->channel(channel)
-      : data(channel);
+const float* AudioBuffer::split_data_const_f(int channel, Band band) const {
+  const float* const* chs = split_channels_const_f(band);
+  return chs ? chs[channel] : NULL;
 }
 
-int16_t* AudioBuffer::low_pass_split_data(int channel) {
+float* AudioBuffer::split_data_f(int channel, Band band) {
+  float* const* chs = split_channels_f(band);
+  return chs ? chs[channel] : NULL;
+}
+
+const float* const* AudioBuffer::split_channels_const_f(Band band) const {
+  if (split_channels_.size() > static_cast<size_t>(band)) {
+    return split_channels_[band]->fbuf_const()->channels();
+  } else {
+    return band == kBand0To8kHz ? channels_->fbuf_const()->channels() : NULL;
+  }
+}
+
+float* const* AudioBuffer::split_channels_f(Band band) {
   mixed_low_pass_valid_ = false;
-  return split_channels_.size() > 0
-      ? split_channels_[0]->ibuf()->channel(channel)
-      : data(channel);
-}
-
-const int16_t* const* AudioBuffer::low_pass_split_channels() const {
-  return split_channels_.size() > 0
-             ? split_channels_[0]->ibuf_const()->channels()
-             : channels();
-}
-
-int16_t* const* AudioBuffer::low_pass_split_channels() {
-  mixed_low_pass_valid_ = false;
-  return split_channels_.size() > 0 ? split_channels_[0]->ibuf()->channels()
-                                   : channels();
-}
-
-const float* AudioBuffer::low_pass_split_data_f(int channel) const {
-  return split_channels_.size() > 0
-      ? split_channels_[0]->fbuf_const()->channel(channel)
-      : data_f(channel);
-}
-
-float* AudioBuffer::low_pass_split_data_f(int channel) {
-  mixed_low_pass_valid_ = false;
-  return split_channels_.size() > 0
-      ? split_channels_[0]->fbuf()->channel(channel)
-      : data_f(channel);
-}
-
-const float* const* AudioBuffer::low_pass_split_channels_f() const {
-  return split_channels_.size() > 0
-      ? split_channels_[0]->fbuf_const()->channels()
-      : channels_f();
-}
-
-float* const* AudioBuffer::low_pass_split_channels_f() {
-  mixed_low_pass_valid_ = false;
-  return split_channels_.size() > 0
-      ? split_channels_[0]->fbuf()->channels()
-      : channels_f();
-}
-
-const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->ibuf_const()->channel(channel)
-      : NULL;
-}
-
-int16_t* AudioBuffer::high_pass_split_data(int channel) {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->ibuf()->channel(channel)
-      : NULL;
-}
-
-const int16_t* const* AudioBuffer::high_pass_split_channels() const {
-  return split_channels_.size() > 1
-             ? split_channels_[1]->ibuf_const()->channels()
-             : NULL;
-}
-
-int16_t* const* AudioBuffer::high_pass_split_channels() {
-  return split_channels_.size() > 1 ? split_channels_[1]->ibuf()->channels()
-                                    : NULL;
-}
-
-const float* AudioBuffer::high_pass_split_data_f(int channel) const {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->fbuf_const()->channel(channel)
-      : NULL;
-}
-
-float* AudioBuffer::high_pass_split_data_f(int channel) {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->fbuf()->channel(channel)
-      : NULL;
-}
-
-const float* const* AudioBuffer::high_pass_split_channels_f() const {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->fbuf_const()->channels()
-      : NULL;
-}
-
-float* const* AudioBuffer::high_pass_split_channels_f() {
-  return split_channels_.size() > 1
-      ? split_channels_[1]->fbuf()->channels()
-      : NULL;
-}
-
-const float* const* AudioBuffer::super_high_pass_split_channels_f() const {
-  return split_channels_.size() > 2
-      ? split_channels_[2]->fbuf_const()->channels()
-      : NULL;
-}
-
-float* const* AudioBuffer::super_high_pass_split_channels_f() {
-  return split_channels_.size() > 2
-      ? split_channels_[2]->fbuf()->channels()
-      : NULL;
+  if (split_channels_.size() > static_cast<size_t>(band)) {
+    return split_channels_[band]->fbuf()->channels();
+  } else {
+    return band == kBand0To8kHz ? channels_->fbuf()->channels() : NULL;
+  }
 }
 
 const int16_t* AudioBuffer::mixed_low_pass_data() {
@@ -350,7 +292,7 @@
   assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
 
   if (num_proc_channels_ == 1) {
-    return low_pass_split_data(0);
+    return split_data_const(0, kBand0To8kHz);
   }
 
   if (!mixed_low_pass_valid_) {
@@ -358,8 +300,8 @@
       mixed_low_pass_channels_.reset(
           new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
     }
-    StereoToMono(low_pass_split_data(0),
-                 low_pass_split_data(1),
+    StereoToMono(split_data_const(0, kBand0To8kHz),
+                 split_data_const(1, kBand0To8kHz),
                  mixed_low_pass_channels_->data(),
                  samples_per_split_channel_);
     mixed_low_pass_valid_ = true;
@@ -462,7 +404,8 @@
                                    num_proc_channels_));
   }
   for (int i = 0; i < num_proc_channels_; i++) {
-    low_pass_reference_channels_->CopyFrom(low_pass_split_data(i), i);
+    low_pass_reference_channels_->CopyFrom(split_data_const(i, kBand0To8kHz),
+                                           i);
   }
 }
 
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index fe2cf36..59bb1ff 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -27,6 +27,12 @@
 class PushSincResampler;
 class IFChannelBuffer;
 
+enum Band {
+  kBand0To8kHz = 0,
+  kBand8To16kHz = 1,
+  kBand16To24kHz = 2
+};
+
 class AudioBuffer {
  public:
   // TODO(ajm): Switch to take ChannelLayouts.
@@ -46,17 +52,14 @@
   // in memory. Prefer to use the const variants of each accessor when
   // possible, since they incur less float<->int16 conversion overhead.
   int16_t* data(int channel);
-  const int16_t* data(int channel) const;
+  const int16_t* data_const(int channel) const;
   int16_t* const* channels();
-  const int16_t* const* channels() const;
-  int16_t* low_pass_split_data(int channel);
-  const int16_t* low_pass_split_data(int channel) const;
-  int16_t* high_pass_split_data(int channel);
-  const int16_t* high_pass_split_data(int channel) const;
-  int16_t* const* low_pass_split_channels();
-  const int16_t* const* low_pass_split_channels() const;
-  int16_t* const* high_pass_split_channels();
-  const int16_t* const* high_pass_split_channels() const;
+  const int16_t* const* channels_const() const;
+  int16_t* split_data(int channel, Band band);
+  const int16_t* split_data_const(int channel, Band band) const;
+  int16_t* const* split_channels(Band band);
+  const int16_t* const* split_channels_const(Band band) const;
+
   // Returns a pointer to the low-pass data downmixed to mono. If this data
   // isn't already available it re-calculates it.
   const int16_t* mixed_low_pass_data();
@@ -65,22 +68,13 @@
   // Float versions of the accessors, with automatic conversion back and forth
   // as necessary. The range of the numbers are the same as for int16_t.
   float* data_f(int channel);
-  const float* data_f(int channel) const;
-
+  const float* data_const_f(int channel) const;
   float* const* channels_f();
-  const float* const* channels_f() const;
-
-  float* low_pass_split_data_f(int channel);
-  const float* low_pass_split_data_f(int channel) const;
-  float* high_pass_split_data_f(int channel);
-  const float* high_pass_split_data_f(int channel) const;
-
-  float* const* low_pass_split_channels_f();
-  const float* const* low_pass_split_channels_f() const;
-  float* const* high_pass_split_channels_f();
-  const float* const* high_pass_split_channels_f() const;
-  float* const* super_high_pass_split_channels_f();
-  const float* const* super_high_pass_split_channels_f() const;
+  const float* const* channels_const_f() const;
+  float* split_data_f(int channel, Band band);
+  const float* split_data_const_f(int channel, Band band) const;
+  float* const* split_channels_f(Band band);
+  const float* const* split_channels_const_f(Band band) const;
 
   const float* keyboard_data() const;
 
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index 47b4f18..f871852 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -89,7 +89,7 @@
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       err = WebRtcAec_BufferFarend(
           my_handle,
-          audio->low_pass_split_data_f(j),
+          audio->split_data_const_f(j, kBand0To8kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()));
 
       if (err != apm_->kNoError) {
@@ -129,10 +129,10 @@
       Handle* my_handle = handle(handle_index);
       err = WebRtcAec_Process(
           my_handle,
-          audio->low_pass_split_data_f(i),
-          audio->high_pass_split_data_f(i),
-          audio->low_pass_split_data_f(i),
-          audio->high_pass_split_data_f(i),
+          audio->split_data_const_f(i, kBand0To8kHz),
+          audio->split_data_const_f(i, kBand8To16kHz),
+          audio->split_data_f(i, kBand0To8kHz),
+          audio->split_data_f(i, kBand8To16kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()),
           apm_->stream_delay_ms(),
           stream_drift_samples_);
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index a03adc5..54d98ae 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -95,7 +95,7 @@
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       err = WebRtcAecm_BufferFarend(
           my_handle,
-          audio->low_pass_split_data(j),
+          audio->split_data_const(j, kBand0To8kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()));
 
       if (err != apm_->kNoError) {
@@ -129,7 +129,7 @@
     // TODO(ajm): improve how this works, possibly inside AECM.
     //            This is kind of hacked up.
     const int16_t* noisy = audio->low_pass_reference(i);
-    int16_t* clean = audio->low_pass_split_data(i);
+    const int16_t* clean = audio->split_data_const(i, kBand0To8kHz);
     if (noisy == NULL) {
       noisy = clean;
       clean = NULL;
@@ -140,7 +140,7 @@
           my_handle,
           noisy,
           clean,
-          audio->low_pass_split_data(i),
+          audio->split_data(i, kBand0To8kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()),
           apm_->stream_delay_ms());
 
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index cf7df16..7ef0ae0 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -90,8 +90,8 @@
       Handle* my_handle = static_cast<Handle*>(handle(i));
       err = WebRtcAgc_AddMic(
           my_handle,
-          audio->low_pass_split_data(i),
-          audio->high_pass_split_data(i),
+          audio->split_data(i, kBand0To8kHz),
+          audio->split_data(i, kBand8To16kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()));
 
       if (err != apm_->kNoError) {
@@ -106,8 +106,8 @@
 
       err = WebRtcAgc_VirtualMic(
           my_handle,
-          audio->low_pass_split_data(i),
-          audio->high_pass_split_data(i),
+          audio->split_data(i, kBand0To8kHz),
+          audio->split_data(i, kBand8To16kHz),
           static_cast<int16_t>(audio->samples_per_split_channel()),
           analog_capture_level_,
           &capture_level_out);
@@ -144,11 +144,11 @@
 
     int err = WebRtcAgc_Process(
         my_handle,
-        audio->low_pass_split_data(i),
-        audio->high_pass_split_data(i),
+        audio->split_data_const(i, kBand0To8kHz),
+        audio->split_data_const(i, kBand8To16kHz),
         static_cast<int16_t>(audio->samples_per_split_channel()),
-        audio->low_pass_split_data(i),
-        audio->high_pass_split_data(i),
+        audio->split_data(i, kBand0To8kHz),
+        audio->split_data(i, kBand8To16kHz),
         capture_levels_[i],
         &capture_level_out,
         apm_->echo_cancellation()->stream_has_echo(),
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index a0c386b..7861fc8 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -123,7 +123,7 @@
   for (int i = 0; i < num_handles(); i++) {
     Handle* my_handle = static_cast<Handle*>(handle(i));
     err = Filter(my_handle,
-                 audio->low_pass_split_data(i),
+                 audio->split_data(i, kBand0To8kHz),
                  audio->samples_per_split_channel());
 
     if (err != apm_->kNoError) {
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.cc b/webrtc/modules/audio_processing/level_estimator_impl.cc
index cfe295a..5fbda83 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -31,7 +31,8 @@
 
   RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
   for (int i = 0; i < audio->num_channels(); ++i) {
-    rms_level->Process(audio->data(i), audio->samples_per_channel());
+    rms_level->Process(audio->data_const(i),
+                       audio->samples_per_channel());
   }
 
   return AudioProcessing::kNoError;
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
index ab8dada..4e056dd 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -67,7 +67,7 @@
     Handle* my_handle = static_cast<Handle*>(handle(i));
 
     int err = WebRtcNs_Analyze(my_handle,
-                               audio->low_pass_split_data_f(i));
+                               audio->split_data_f(i, kBand0To8kHz));
     if (err != apm_->kNoError) {
       return GetHandleError(my_handle);
     }
@@ -89,16 +89,16 @@
     Handle* my_handle = static_cast<Handle*>(handle(i));
 #if defined(WEBRTC_NS_FLOAT)
     err = WebRtcNs_Process(my_handle,
-                           audio->low_pass_split_data_f(i),
-                           audio->high_pass_split_data_f(i),
-                           audio->low_pass_split_data_f(i),
-                           audio->high_pass_split_data_f(i));
+                           audio->split_data_f(i, kBand0To8kHz),
+                           audio->split_data_f(i, kBand8To16kHz),
+                           audio->split_data_f(i, kBand0To8kHz),
+                           audio->split_data_f(i, kBand8To16kHz));
 #elif defined(WEBRTC_NS_FIXED)
     err = WebRtcNsx_Process(my_handle,
-                            audio->low_pass_split_data(i),
-                            audio->high_pass_split_data(i),
-                            audio->low_pass_split_data(i),
-                            audio->high_pass_split_data(i));
+                            audio->split_data(i, kBand0To8kHz),
+                            audio->split_data(i, kBand8To16kHz),
+                            audio->split_data(i, kBand0To8kHz),
+                            audio->split_data(i, kBand8To16kHz));
 #endif
 
     if (err != apm_->kNoError) {