Pass audio to AudioEncoder::Encode() in an ArrayView

Instead of in separate pointer and size arguments.

Review URL: https://codereview.webrtc.org/1418423010

Cr-Commit-Position: refs/heads/master@{#10535}
diff --git a/webrtc/base/array_view.h b/webrtc/base/array_view.h
index 019bd8b..02676f1 100644
--- a/webrtc/base/array_view.h
+++ b/webrtc/base/array_view.h
@@ -61,6 +61,7 @@
   // is const, because the ArrayView doesn't own the array. (To prevent
   // mutation, use ArrayView<const T>.)
   size_t size() const { return size_; }
+  bool empty() const { return size_ == 0; }
   T* data() const { return data_; }
   T& operator[](size_t idx) const {
     RTC_DCHECK_LT(idx, size_);
@@ -72,6 +73,15 @@
   const T* cbegin() const { return data_; }
   const T* cend() const { return data_ + size_; }
 
+  // Comparing two ArrayViews compares their (pointer,size) pairs; it does
+  // *not* dereference the pointers.
+  friend bool operator==(const ArrayView& a, const ArrayView& b) {
+    return a.data_ == b.data_ && a.size_ == b.size_;
+  }
+  friend bool operator!=(const ArrayView& a, const ArrayView& b) {
+    return !(a == b);
+  }
+
  private:
   // Invariant: !data_ iff size_ == 0.
   void CheckInvariant() const { RTC_DCHECK_EQ(!data_, size_ == 0); }
diff --git a/webrtc/base/array_view_unittest.cc b/webrtc/base/array_view_unittest.cc
index 0d1bff0..8bb1bcc 100644
--- a/webrtc/base/array_view_unittest.cc
+++ b/webrtc/base/array_view_unittest.cc
@@ -214,4 +214,20 @@
   }
 }
 
+TEST(ArrayViewTest, TestEmpty) {
+  EXPECT_TRUE(ArrayView<int>().empty());
+  const int a[] = {1, 2, 3};
+  EXPECT_FALSE(ArrayView<const int>(a).empty());
+}
+
+TEST(ArrayViewTest, TestCompare) {
+  int a[] = {1, 2, 3};
+  int b[] = {1, 2, 3};
+  EXPECT_EQ(ArrayView<int>(a), ArrayView<int>(a));
+  EXPECT_EQ(ArrayView<int>(), ArrayView<int>());
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>(b));
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>());
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>(a, 2));
+}
+
 }  // namespace rtc
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.cc b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
index 6d76300..388b0ff 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
@@ -21,13 +21,13 @@
   return SampleRateHz();
 }
 
-AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
-                                               const int16_t* audio,
-                                               size_t num_samples_per_channel,
-                                               size_t max_encoded_bytes,
-                                               uint8_t* encoded) {
-  RTC_CHECK_EQ(num_samples_per_channel,
-               static_cast<size_t>(SampleRateHz() / 100));
+AudioEncoder::EncodedInfo AudioEncoder::Encode(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
+  RTC_CHECK_EQ(audio.size(),
+               static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
   EncodedInfo info =
       EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
   RTC_CHECK_LE(info.encoded_bytes, max_encoded_bytes);
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index cda9d86..553c35e 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -14,6 +14,7 @@
 #include <algorithm>
 #include <vector>
 
+#include "webrtc/base/array_view.h"
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -91,13 +92,12 @@
   // Encode() checks some preconditions, calls EncodeInternal() which does the
   // actual work, and then checks some postconditions.
   EncodedInfo Encode(uint32_t rtp_timestamp,
-                     const int16_t* audio,
-                     size_t num_samples_per_channel,
+                     rtc::ArrayView<const int16_t> audio,
                      size_t max_encoded_bytes,
                      uint8_t* encoded);
 
   virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                                     const int16_t* audio,
+                                     rtc::ArrayView<const int16_t> audio,
                                      size_t max_encoded_bytes,
                                      uint8_t* encoded) = 0;
 
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index 1215246..e98c537 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -97,7 +97,7 @@
 
 AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   RTC_CHECK_GE(max_encoded_bytes,
@@ -106,9 +106,8 @@
   RTC_CHECK_EQ(speech_buffer_.size(),
                rtp_timestamps_.size() * samples_per_10ms_frame);
   rtp_timestamps_.push_back(rtp_timestamp);
-  for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
-    speech_buffer_.push_back(audio[i]);
-  }
+  RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size());
+  speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend());
   const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
   if (rtp_timestamps_.size() < frames_to_encode) {
     return EncodedInfo();
@@ -242,9 +241,12 @@
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
   AudioEncoder::EncodedInfo info;
   for (size_t i = 0; i < frames_to_encode; ++i) {
-    info = speech_encoder_->Encode(
-        rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
-        samples_per_10ms_frame, max_encoded_bytes, encoded);
+    info =
+        speech_encoder_->Encode(rtp_timestamps_.front(),
+                                rtc::ArrayView<const int16_t>(
+                                    &speech_buffer_[i * samples_per_10ms_frame],
+                                    samples_per_10ms_frame),
+                                max_encoded_bytes, encoded);
     if (i + 1 == frames_to_encode) {
       RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
     } else {
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index 0b837a0..ec3f633 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -75,8 +75,10 @@
 
   void Encode() {
     ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
-    encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
-                                 encoded_.size(), &encoded_[0]);
+    encoded_info_ = cng_->Encode(
+        timestamp_,
+        rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
+        encoded_.size(), &encoded_[0]);
     timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
   }
 
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
index 3ca9eb6..c0d61c3 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
@@ -57,7 +57,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index dde3cc6..6930e2c 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -88,16 +88,13 @@
 
 AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
-  const int num_samples = SampleRateHz() / 100 * NumChannels();
   if (speech_buffer_.empty()) {
     first_timestamp_in_buffer_ = rtp_timestamp;
   }
-  for (int i = 0; i < num_samples; ++i) {
-    speech_buffer_.push_back(audio[i]);
-  }
+  speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end());
   if (speech_buffer_.size() < full_frame_samples_) {
     return EncodedInfo();
   }
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
index e532f9b..76eb594 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
@@ -42,7 +42,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index 43b097f..4c9535e 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -93,7 +93,7 @@
 
 AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
index 12495c5..aad75a1 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
@@ -42,7 +42,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 065dc06..7b497fd 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -10,7 +10,7 @@
 
 #include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
 
-#include <cstring>
+#include <algorithm>
 #include <limits>
 #include "webrtc/base/checks.h"
 #include "webrtc/common_types.h"
@@ -91,7 +91,7 @@
 
 AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
@@ -101,9 +101,9 @@
     first_timestamp_in_buffer_ = rtp_timestamp;
 
   // Buffer input.
-  std::memcpy(input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_,
-              audio,
-              kSampleRateHz / 100 * sizeof(audio[0]));
+  RTC_DCHECK_EQ(static_cast<size_t>(kSampleRateHz / 100), audio.size());
+  std::copy(audio.cbegin(), audio.cend(),
+            input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
 
   // If we don't yet have enough buffered input for a whole packet, we're done
   // for now.
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h
index 2bb3101..e050731 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h
@@ -41,7 +41,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index b15ad94..3226877 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -61,7 +61,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index 279f80d..4cfd782 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -115,7 +115,7 @@
 template <typename T>
 AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   if (!packet_in_progress_) {
@@ -127,7 +127,7 @@
     IsacBandwidthInfo bwinfo = bwinfo_->Get();
     T::SetBandwidthInfo(isac_state_, &bwinfo);
   }
-  int r = T::Encode(isac_state_, audio, encoded);
+  int r = T::Encode(isac_state_, audio.data(), encoded);
   RTC_CHECK_GE(r, 0) << "Encode failed (error code "
                      << T::GetErrorCode(isac_state_) << ")";
 
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 95426d8..29cba8f 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -32,7 +32,7 @@
   // Note, we explicitly chose not to create a mock for the Encode method.
   MOCK_METHOD4(EncodeInternal,
                EncodedInfo(uint32_t timestamp,
-                           const int16_t* audio,
+                           rtc::ArrayView<const int16_t> audio,
                            size_t max_encoded_bytes,
                            uint8_t* encoded));
   MOCK_METHOD0(Reset, void());
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index eac7412..3daf3f9 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -132,13 +132,13 @@
 
 AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
   if (input_buffer_.empty())
     first_timestamp_in_buffer_ = rtp_timestamp;
-  input_buffer_.insert(input_buffer_.end(), audio,
-                       audio + SamplesPer10msFrame());
+  RTC_DCHECK_EQ(static_cast<size_t>(SamplesPer10msFrame()), audio.size());
+  input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
   if (input_buffer_.size() <
       (static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
     return EncodedInfo();
diff --git a/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h
index 7f2b563..088e2de 100644
--- a/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h
@@ -62,7 +62,7 @@
   int GetTargetBitrate() const override;
 
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index 4630e44..c059fc5 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -10,6 +10,7 @@
 #include <string>
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
 #include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
 #include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
@@ -44,8 +45,7 @@
   void PrepareSpeechData(int channel, int block_length_ms, int loop_length_ms);
 
   int EncodeDecode(WebRtcOpusEncInst* encoder,
-                   const int16_t* input_audio,
-                   size_t input_samples,
+                   rtc::ArrayView<const int16_t> input_audio,
                    WebRtcOpusDecInst* decoder,
                    int16_t* output_audio,
                    int16_t* audio_type);
@@ -96,13 +96,14 @@
 }
 
 int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
-                           const int16_t* input_audio,
-                           size_t input_samples,
+                           rtc::ArrayView<const int16_t> input_audio,
                            WebRtcOpusDecInst* decoder,
                            int16_t* output_audio,
                            int16_t* audio_type) {
-  int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
-                                            kMaxBytes, bitstream_);
+  int encoded_bytes_int = WebRtcOpus_Encode(
+      encoder, input_audio.data(),
+      rtc::CheckedDivExact(input_audio.size(), static_cast<size_t>(channels_)),
+      kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
   int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
@@ -129,8 +130,7 @@
                                      channels_ == 1 ? 32000 : 64000));
 
   // Set input audio as silence.
-  int16_t* silence = new int16_t[kOpus20msFrameSamples * channels_];
-  memset(silence, 0, sizeof(int16_t) * kOpus20msFrameSamples * channels_);
+  std::vector<int16_t> silence(kOpus20msFrameSamples * channels_, 0);
 
   // Setting DTX.
   EXPECT_EQ(0, dtx ? WebRtcOpus_EnableDtx(opus_encoder_) :
@@ -142,9 +142,8 @@
   for (int i = 0; i < 100; ++i) {
     EXPECT_EQ(kOpus20msFrameSamples,
               static_cast<size_t>(EncodeDecode(
-                  opus_encoder_, speech_data_.GetNextBlock(),
-                  kOpus20msFrameSamples, opus_decoder_, output_data_decode,
-                  &audio_type)));
+                  opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+                  output_data_decode, &audio_type)));
     // If not DTX, it should never enter DTX mode. If DTX, we do not care since
     // whether it enters DTX depends on the signal type.
     if (!dtx) {
@@ -158,10 +157,9 @@
   // We input some silent segments. In DTX mode, the encoder will stop sending.
   // However, DTX may happen after a while.
   for (int i = 0; i < 30; ++i) {
-    EXPECT_EQ(kOpus20msFrameSamples,
-              static_cast<size_t>(EncodeDecode(
-                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
-                  output_data_decode, &audio_type)));
+    EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
+                                         opus_encoder_, silence, opus_decoder_,
+                                         output_data_decode, &audio_type)));
     if (!dtx) {
       EXPECT_GT(encoded_bytes_, 1U);
       EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@@ -183,9 +181,9 @@
     // DTX mode is maintained 19 frames.
     for (int i = 0; i < 19; ++i) {
       EXPECT_EQ(kOpus20msFrameSamples,
-                static_cast<size_t>(EncodeDecode(
-                    opus_encoder_, silence, kOpus20msFrameSamples,
-                    opus_decoder_, output_data_decode, &audio_type)));
+                static_cast<size_t>(
+                    EncodeDecode(opus_encoder_, silence, opus_decoder_,
+                                 output_data_decode, &audio_type)));
       if (dtx) {
         EXPECT_EQ(0U, encoded_bytes_)  // Send 0 byte.
             << "Opus should have entered DTX mode.";
@@ -201,10 +199,9 @@
     }
 
     // Quit DTX after 19 frames.
-    EXPECT_EQ(kOpus20msFrameSamples,
-              static_cast<size_t>(EncodeDecode(
-                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
-                  output_data_decode, &audio_type)));
+    EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
+                                         opus_encoder_, silence, opus_decoder_,
+                                         output_data_decode, &audio_type)));
 
     EXPECT_GT(encoded_bytes_, 1U);
     EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@@ -212,10 +209,9 @@
     EXPECT_EQ(0, audio_type);  // Speech.
 
     // Enters DTX again immediately.
-    EXPECT_EQ(kOpus20msFrameSamples,
-              static_cast<size_t>(EncodeDecode(
-                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
-                  output_data_decode, &audio_type)));
+    EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
+                                         opus_encoder_, silence, opus_decoder_,
+                                         output_data_decode, &audio_type)));
     if (dtx) {
       EXPECT_EQ(1U, encoded_bytes_);  // Send 1 byte.
       EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
@@ -232,10 +228,9 @@
   silence[0] = 10000;
   if (dtx) {
     // Verify that encoder/decoder can jump out from DTX mode.
-    EXPECT_EQ(kOpus20msFrameSamples,
-              static_cast<size_t>(EncodeDecode(
-                  opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
-                  output_data_decode, &audio_type)));
+    EXPECT_EQ(kOpus20msFrameSamples, static_cast<size_t>(EncodeDecode(
+                                         opus_encoder_, silence, opus_decoder_,
+                                         output_data_decode, &audio_type)));
     EXPECT_GT(encoded_bytes_, 1U);
     EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
     EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
@@ -244,7 +239,6 @@
 
   // Free memory.
   delete[] output_data_decode;
-  delete[] silence;
   EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
   EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
 }
@@ -314,10 +308,9 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            static_cast<size_t>(EncodeDecode(
-                opus_encoder_, speech_data_.GetNextBlock(),
-                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
-                &audio_type)));
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
 
   // Free memory.
   delete[] output_data_decode;
@@ -374,10 +367,9 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            static_cast<size_t>(EncodeDecode(
-                opus_encoder_, speech_data_.GetNextBlock(),
-                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
-                &audio_type)));
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
 
   WebRtcOpus_DecoderInit(opus_decoder_);
 
@@ -513,10 +505,9 @@
   int16_t audio_type;
   int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
   EXPECT_EQ(kOpus20msFrameSamples,
-            static_cast<size_t>(EncodeDecode(
-                opus_encoder_, speech_data_.GetNextBlock(),
-                kOpus20msFrameSamples, opus_decoder_, output_data_decode,
-                &audio_type)));
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
 
   // Call decoder PLC.
   int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
@@ -542,10 +533,12 @@
   EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
 
   // 10 ms. We use only first 10 ms of a 20 ms block.
-  int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
-                                            speech_data_.GetNextBlock(),
-                                            kOpus10msFrameSamples,
-                                            kMaxBytes, bitstream_);
+  auto speech_block = speech_data_.GetNextBlock();
+  int encoded_bytes_int = WebRtcOpus_Encode(
+      opus_encoder_, speech_block.data(),
+      rtc::CheckedDivExact(speech_block.size(),
+                           2 * static_cast<size_t>(channels_)),
+      kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus10msFrameSamples,
             static_cast<size_t>(WebRtcOpus_DurationEst(
@@ -553,10 +546,11 @@
                 static_cast<size_t>(encoded_bytes_int))));
 
   // 20 ms
-  encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
-                                        speech_data_.GetNextBlock(),
-                                        kOpus20msFrameSamples,
-                                        kMaxBytes, bitstream_);
+  speech_block = speech_data_.GetNextBlock();
+  encoded_bytes_int = WebRtcOpus_Encode(
+      opus_encoder_, speech_block.data(),
+      rtc::CheckedDivExact(speech_block.size(), static_cast<size_t>(channels_)),
+      kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus20msFrameSamples,
             static_cast<size_t>(WebRtcOpus_DurationEst(
@@ -594,10 +588,12 @@
   OpusRepacketizer* rp = opus_repacketizer_create();
 
   for (int idx = 0; idx < kPackets; idx++) {
-    encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
-                                       speech_data_.GetNextBlock(),
-                                       kOpus20msFrameSamples, kMaxBytes,
-                                       bitstream_);
+    auto speech_block = speech_data_.GetNextBlock();
+    encoded_bytes_ =
+        WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
+                          rtc::CheckedDivExact(speech_block.size(),
+                                               static_cast<size_t>(channels_)),
+                          kMaxBytes, bitstream_);
     EXPECT_EQ(OPUS_OK, opus_repacketizer_cat(rp, bitstream_, encoded_bytes_));
   }
 
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index a19d194..177c19a 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -54,12 +54,11 @@
 
 AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
     uint32_t rtp_timestamp,
-    const int16_t* audio,
+    rtc::ArrayView<const int16_t> audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
-  EncodedInfo info = speech_encoder_->Encode(
-      rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
-      max_encoded_bytes, encoded);
+  EncodedInfo info =
+      speech_encoder_->Encode(rtp_timestamp, audio, max_encoded_bytes, encoded);
   RTC_CHECK_GE(max_encoded_bytes,
                info.encoded_bytes + secondary_info_.encoded_bytes);
   RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 7837010..d7d3a66 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -44,7 +44,7 @@
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
   EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
-                             const int16_t* audio,
+                             rtc::ArrayView<const int16_t> audio,
                              size_t max_encoded_bytes,
                              uint8_t* encoded) override;
   void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index cb50652..c4c3910 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -60,8 +60,10 @@
 
   void Encode() {
     ASSERT_TRUE(red_.get() != NULL);
-    encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
-                                 encoded_.size(), &encoded_[0]);
+    encoded_info_ = red_->Encode(
+        timestamp_,
+        rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
+        encoded_.size(), &encoded_[0]);
     timestamp_ += num_audio_samples_10ms;
   }
 
@@ -83,7 +85,7 @@
   }
 
   AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
-                                   const int16_t* audio,
+                                   rtc::ArrayView<const int16_t> audio,
                                    size_t max_encoded_bytes,
                                    uint8_t* encoded) {
     if (write_payload_) {
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 260f8a8..3b8b140 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -149,7 +149,9 @@
 
   encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
   encoded_info = audio_encoder->Encode(
-      rtp_timestamp, input_data.audio, input_data.length_per_channel,
+      rtp_timestamp, rtc::ArrayView<const int16_t>(
+                         input_data.audio, input_data.audio_channel *
+                                               input_data.length_per_channel),
       encode_buffer_.size(), encode_buffer_.data());
   encode_buffer_.SetSize(encoded_info.encoded_bytes);
   bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 3aee344..cfceb0d 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -656,7 +656,11 @@
   }
 
   void InsertAudio() {
-    memcpy(input_frame_.data_, audio_loop_.GetNextBlock(), kNumSamples10ms);
+    // TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
+    // this call confuses the number of samples with the number of bytes, and
+    // ends up copying only half of what it should.
+    memcpy(input_frame_.data_, audio_loop_.GetNextBlock().data(),
+           kNumSamples10ms);
     AudioCodingModuleTestOldApi::InsertAudio();
   }
 
@@ -774,9 +778,9 @@
       // Encode new frame.
       uint32_t input_timestamp = rtp_header_.header.timestamp;
       while (info.encoded_bytes == 0) {
-        info = isac_encoder_->Encode(
-            input_timestamp, audio_loop_.GetNextBlock(), kNumSamples10ms,
-            max_encoded_bytes, encoded.get());
+        info =
+            isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
+                                  max_encoded_bytes, encoded.get());
         input_timestamp += 160;  // 10 ms at 16 kHz.
       }
       EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc b/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
index 6c23261..6c4d38f 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
@@ -46,8 +46,8 @@
                        int expected_send_even_if_empty) {
     uint8_t out[kPacketSizeSamples];
     AudioEncoder::EncodedInfo encoded_info;
-    encoded_info = codec_owner_.Encoder()->Encode(
-        timestamp_, kZeroData, kDataLengthSamples, kPacketSizeSamples, out);
+    encoded_info = codec_owner_.Encoder()->Encode(timestamp_, kZeroData,
+                                                  kPacketSizeSamples, out);
     timestamp_ += kDataLengthSamples;
     EXPECT_TRUE(encoded_info.redundant.empty());
     EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
@@ -146,24 +146,26 @@
   AudioEncoder::EncodedInfo info;
   EXPECT_CALL(external_encoder, SampleRateHz())
       .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(external_encoder, NumChannels()).WillRepeatedly(Return(1));
 
   {
     InSequence s;
     info.encoded_timestamp = 0;
     EXPECT_CALL(external_encoder,
-                EncodeInternal(0, audio, arraysize(encoded), encoded))
+                EncodeInternal(0, rtc::ArrayView<const int16_t>(audio),
+                               arraysize(encoded), encoded))
         .WillOnce(Return(info));
     EXPECT_CALL(external_encoder, Mark("A"));
     EXPECT_CALL(external_encoder, Mark("B"));
     info.encoded_timestamp = 2;
     EXPECT_CALL(external_encoder,
-                EncodeInternal(2, audio, arraysize(encoded), encoded))
+                EncodeInternal(2, rtc::ArrayView<const int16_t>(audio),
+                               arraysize(encoded), encoded))
         .WillOnce(Return(info));
     EXPECT_CALL(external_encoder, Die());
   }
 
-  info = codec_owner_.Encoder()->Encode(0, audio, arraysize(audio),
-                                        arraysize(encoded), encoded);
+  info = codec_owner_.Encoder()->Encode(0, audio, arraysize(encoded), encoded);
   EXPECT_EQ(0u, info.encoded_timestamp);
   external_encoder.Mark("A");
 
@@ -172,14 +174,12 @@
   codec_inst.pacsize = kPacketSizeSamples;
   ASSERT_TRUE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
   // Don't expect any more calls to the external encoder.
-  info = codec_owner_.Encoder()->Encode(1, audio, arraysize(audio),
-                                        arraysize(encoded), encoded);
+  info = codec_owner_.Encoder()->Encode(1, audio, arraysize(encoded), encoded);
   external_encoder.Mark("B");
 
   // Change back to external encoder again.
   codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
-  info = codec_owner_.Encoder()->Encode(2, audio, arraysize(audio),
-                                        arraysize(encoded), encoded);
+  info = codec_owner_.Encoder()->Encode(2, audio, arraysize(encoded), encoded);
   EXPECT_EQ(2u, info.encoded_timestamp);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 8f82fb1..accae85 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -158,7 +158,10 @@
                                                  interleaved_input.get());
 
       encoded_info_ = audio_encoder_->Encode(
-          0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
+          0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
+                                           audio_encoder_->NumChannels() *
+                                               audio_encoder_->SampleRateHz() /
+                                               100),
           data_length_ * 2, output);
     }
     EXPECT_EQ(payload_type_, encoded_info_.payload_type);
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 4340f54..6f47dd1 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -939,8 +939,10 @@
 
     uint32_t receive_timestamp = 0;
     for (int n = 0; n < 10; ++n) {  // Insert few packets and get audio.
-      size_t enc_len_bytes = WebRtcPcm16b_Encode(
-          input.GetNextBlock(), expected_samples_per_channel, payload);
+      auto block = input.GetNextBlock();
+      ASSERT_EQ(expected_samples_per_channel, block.size());
+      size_t enc_len_bytes =
+          WebRtcPcm16b_Encode(block.data(), block.size(), payload);
       ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
 
       number_channels = 0;
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 2042e0d..b61bfde 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -66,8 +66,10 @@
     uint32_t dummy_timestamp = 0;
     AudioEncoder::EncodedInfo info;
     do {
-      info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
-                              kFrameSizeSamples, max_bytes, payload);
+      info = encoder_->Encode(dummy_timestamp,
+                              rtc::ArrayView<const int16_t>(
+                                  in_data + encoded_samples, kFrameSizeSamples),
+                              max_bytes, payload);
       encoded_samples += kFrameSizeSamples;
     } while (info.encoded_bytes == 0);
     return rtc::checked_cast<int>(info.encoded_bytes);
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index 422a9fa..01c3964 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -66,8 +66,10 @@
     uint32_t dummy_timestamp = 0;
     AudioEncoder::EncodedInfo info;
     do {
-      info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
-                              kFrameSizeSamples, max_bytes, payload);
+      info = encoder_->Encode(dummy_timestamp,
+                              rtc::ArrayView<const int16_t>(
+                                  in_data + encoded_samples, kFrameSizeSamples),
+                              max_bytes, payload);
       encoded_samples += kFrameSizeSamples;
     } while (info.encoded_bytes == 0);
     return rtc::checked_cast<int>(info.encoded_bytes);
diff --git a/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc b/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
index 2d2a7e3..eed9575 100644
--- a/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
@@ -43,13 +43,14 @@
   return true;
 }
 
-const int16_t* AudioLoop::GetNextBlock() {
+rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
   // Check that the AudioLoop is initialized.
-  if (block_length_samples_ == 0) return NULL;
+  if (block_length_samples_ == 0)
+    return rtc::ArrayView<const int16_t>();
 
   const int16_t* output_ptr = &audio_array_[next_index_];
   next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
-  return output_ptr;
+  return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
 }
 
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/audio_loop.h b/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
index a897ee5..14e20f6 100644
--- a/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
+++ b/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
@@ -13,6 +13,7 @@
 
 #include <string>
 
+#include "webrtc/base/array_view.h"
 #include "webrtc/base/constructormagic.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/typedefs.h"
@@ -40,10 +41,9 @@
   bool Init(const std::string file_name, size_t max_loop_length_samples,
             size_t block_length_samples);
 
-  // Returns a pointer to the next block of audio. The number given as
-  // |block_length_samples| to the Init() function determines how many samples
-  // that can be safely read from the pointer.
-  const int16_t* GetNextBlock();
+  // Returns a (pointer,size) pair for the next block of audio. The size is
+  // equal to the |block_length_samples| Init() argument.
+  rtc::ArrayView<const int16_t> GetNextBlock();
 
  private:
   size_t next_index_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 9fe4dff..dbea1c6 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -62,11 +62,12 @@
   bool drift_flipped = false;
   int32_t packet_input_time_ms =
       rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
-  const int16_t* input_samples = audio_loop.GetNextBlock();
-  if (!input_samples) exit(1);
+  auto input_samples = audio_loop.GetNextBlock();
+  if (input_samples.empty())
+    exit(1);
   uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
-  size_t payload_len =
-      WebRtcPcm16b_Encode(input_samples, kInputBlockSizeSamples, input_payload);
+  size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+                                           input_samples.size(), input_payload);
   assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
 
   // Main loop.
@@ -93,10 +94,10 @@
                                                   kInputBlockSizeSamples,
                                                   &rtp_header);
       input_samples = audio_loop.GetNextBlock();
-      if (!input_samples) return -1;
-      payload_len = WebRtcPcm16b_Encode(const_cast<int16_t*>(input_samples),
-                                        kInputBlockSizeSamples,
-                                        input_payload);
+      if (input_samples.empty())
+        return -1;
+      payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+                                        input_samples.size(), input_payload);
       assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
     }