We changed Encode() and EncodeInternal() return type from bool to void in this issue:
https://webrtc-codereview.appspot.com/38279004/
Now we don't have to pass EncodedInfo as output parameter, but can return it instead. This also adds the benefit of making clear that EncodeInternal() needs to fill in this info.

R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/43839004

Cr-Commit-Position: refs/heads/master@{#8749}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8749 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.cc b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
index 1d83e54..76cb33b 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
@@ -19,16 +19,19 @@
 AudioEncoder::EncodedInfo::~EncodedInfo() {
 }
 
-void AudioEncoder::Encode(uint32_t rtp_timestamp,
-                          const int16_t* audio,
-                          size_t num_samples_per_channel,
-                          size_t max_encoded_bytes,
-                          uint8_t* encoded,
-                          EncodedInfo* info) {
+const AudioEncoder::EncodedInfo AudioEncoder::kZeroEncodedBytes;
+
+AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
+                                               const int16_t* audio,
+                                               size_t num_samples_per_channel,
+                                               size_t max_encoded_bytes,
+                                               uint8_t* encoded) {
   CHECK_EQ(num_samples_per_channel,
            static_cast<size_t>(SampleRateHz() / 100));
-  EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded, info);
-  CHECK_LE(info->encoded_bytes, max_encoded_bytes);
+  EncodedInfo info =
+      EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
+  CHECK_LE(info.encoded_bytes, max_encoded_bytes);
+  return info;
 }
 
 int AudioEncoder::RtpTimestampRateHz() const {
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index 20ac8b9..6e29f08 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -54,20 +54,21 @@
     std::vector<EncodedInfoLeaf> redundant;
   };
 
+  static const EncodedInfo kZeroEncodedBytes;
+
   virtual ~AudioEncoder() {}
 
   // Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
   // num_channels() samples). Multi-channel audio must be sample-interleaved.
-  // The encoder produces zero or more bytes of output in |encoded|,
-  // and provides additional encoding information in |info|.
+  // The encoder produces zero or more bytes of output in |encoded| and
+  // returns additional encoding information.
   // The caller is responsible for making sure that |max_encoded_bytes| is
   // not smaller than the number of bytes actually produced by the encoder.
-  void Encode(uint32_t rtp_timestamp,
-              const int16_t* audio,
-              size_t num_samples_per_channel,
-              size_t max_encoded_bytes,
-              uint8_t* encoded,
-              EncodedInfo* info);
+  EncodedInfo Encode(uint32_t rtp_timestamp,
+                     const int16_t* audio,
+                     size_t num_samples_per_channel,
+                     size_t max_encoded_bytes,
+                     uint8_t* encoded);
 
   // Return the input sample rate in Hz and the number of input channels.
   // These are constants set at instantiation time.
@@ -107,11 +108,10 @@
   virtual void SetProjectedPacketLossRate(double fraction) {}
 
  protected:
-  virtual void EncodeInternal(uint32_t rtp_timestamp,
-                              const int16_t* audio,
-                              size_t max_encoded_bytes,
-                              uint8_t* encoded,
-                              EncodedInfo* info) = 0;
+  virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                                     const int16_t* audio,
+                                     size_t max_encoded_bytes,
+                                     uint8_t* encoded) = 0;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index d7c1ea0..38ebca1 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -109,13 +109,12 @@
   speech_encoder_->SetProjectedPacketLossRate(fraction);
 }
 
-void AudioEncoderCng::EncodeInternal(uint32_t rtp_timestamp,
-                                     const int16_t* audio,
-                                     size_t max_encoded_bytes,
-                                     uint8_t* encoded,
-                                     EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
-  info->encoded_bytes = 0;
   const int num_samples = SampleRateHz() / 100 * NumChannels();
   if (speech_buffer_.empty()) {
     CHECK_EQ(frames_in_buffer_, 0);
@@ -126,7 +125,7 @@
   }
   ++frames_in_buffer_;
   if (frames_in_buffer_ < speech_encoder_->Num10MsFramesInNextPacket()) {
-    return;
+    return kZeroEncodedBytes;
   }
   CHECK_LE(frames_in_buffer_ * 10, kMaxFrameSizeMs)
       << "Frame size cannot be larger than " << kMaxFrameSizeMs
@@ -159,14 +158,15 @@
         samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
   }
 
+  EncodedInfo info;
   switch (activity) {
     case Vad::kPassive: {
-      EncodePassive(max_encoded_bytes, encoded, info);
+      info = EncodePassive(max_encoded_bytes, encoded);
       last_frame_active_ = false;
       break;
     }
     case Vad::kActive: {
-      EncodeActive(max_encoded_bytes, encoded, info);
+      info = EncodeActive(max_encoded_bytes, encoded);
       last_frame_active_ = true;
       break;
     }
@@ -178,15 +178,17 @@
 
   speech_buffer_.clear();
   frames_in_buffer_ = 0;
+  return info;
 }
 
-void AudioEncoderCng::EncodePassive(size_t max_encoded_bytes,
-                                    uint8_t* encoded,
-                                    EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   bool force_sid = last_frame_active_;
   bool output_produced = false;
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
   CHECK_GE(max_encoded_bytes, frames_in_buffer_ * samples_per_10ms_frame);
+  AudioEncoder::EncodedInfo info;
   for (int i = 0; i < frames_in_buffer_; ++i) {
     int16_t encoded_bytes_tmp = 0;
     CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
@@ -195,30 +197,32 @@
                               encoded, &encoded_bytes_tmp, force_sid), 0);
     if (encoded_bytes_tmp > 0) {
       CHECK(!output_produced);
-      info->encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
+      info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
       output_produced = true;
       force_sid = false;
     }
   }
-  info->encoded_timestamp = first_timestamp_in_buffer_;
-  info->payload_type = cng_payload_type_;
-  info->send_even_if_empty = true;
-  info->speech = false;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = cng_payload_type_;
+  info.send_even_if_empty = true;
+  info.speech = false;
+  return info;
 }
 
-void AudioEncoderCng::EncodeActive(size_t max_encoded_bytes,
-                                   uint8_t* encoded,
-                                   EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+  AudioEncoder::EncodedInfo info;
   for (int i = 0; i < frames_in_buffer_; ++i) {
-    speech_encoder_->Encode(first_timestamp_in_buffer_,
-                            &speech_buffer_[i * samples_per_10ms_frame],
-                            samples_per_10ms_frame, max_encoded_bytes,
-                            encoded, info);
+    info = speech_encoder_->Encode(
+        first_timestamp_in_buffer_, &speech_buffer_[i * samples_per_10ms_frame],
+        samples_per_10ms_frame, max_encoded_bytes, encoded);
     if (i < frames_in_buffer_ - 1) {
-      CHECK_EQ(info->encoded_bytes, 0u) << "Encoder delivered data too early.";
+      CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
     }
   }
+  return info;
 }
 
 size_t AudioEncoderCng::SamplesPer10msFrame() const {
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index 5dfa4d5..528cf34 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -75,9 +75,8 @@
 
   void Encode() {
     ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
-    encoded_info_ = AudioEncoder::EncodedInfo();
-    cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
-                 encoded_.size(), &encoded_[0], &encoded_info_);
+    encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
+                                 encoded_.size(), &encoded_[0]);
     timestamp_ += num_audio_samples_10ms_;
   }
 
@@ -92,24 +91,24 @@
         .WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
 
     // Don't expect any calls to the encoder yet.
-    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
+    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
     for (int i = 0; i < blocks_per_frame - 1; ++i) {
       Encode();
       EXPECT_EQ(0u, encoded_info_.encoded_bytes);
     }
-    AudioEncoder::EncodedInfo info;
     if (active_speech) {
       // Now expect |blocks_per_frame| calls to the encoder in sequence.
       // Let the speech codec mock return true and set the number of encoded
       // bytes to |kMockReturnEncodedBytes|.
       InSequence s;
       for (int j = 0; j < blocks_per_frame - 1; ++j) {
-        EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-            .WillOnce(SetArgPointee<4>(info));
+        EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+            .WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
       }
+      AudioEncoder::EncodedInfo info;
       info.encoded_bytes = kMockReturnEncodedBytes;
-      EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-          .WillOnce(SetArgPointee<4>(info));
+      EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+          .WillOnce(Return(info));
     }
     Encode();
     if (active_speech) {
@@ -254,7 +253,7 @@
   EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
       .WillRepeatedly(Return(Vad::kPassive));
   // Expect no calls at all to the speech encoder mock.
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
   uint32_t expected_timestamp = timestamp_;
   for (int i = 0; i < 100; ++i) {
     Encode();
@@ -284,20 +283,23 @@
   CreateCng();
 
   // All of the frame is active speech.
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .Times(6);
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .Times(6)
+      .WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
   EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
   EXPECT_TRUE(encoded_info_.speech);
 
   // First half of the frame is active speech.
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .Times(6);
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .Times(6)
+      .WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
   EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
   EXPECT_TRUE(encoded_info_.speech);
 
   // Second half of the frame is active speech.
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .Times(6);
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .Times(6)
+      .WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
   EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
   EXPECT_TRUE(encoded_info_.speech);
 
@@ -336,22 +338,10 @@
   CheckVadInputSize(60, 30, 30);
 }
 
-// Verifies that the EncodedInfo struct pointer passed to
-// AudioEncoderCng::Encode is propagated to the Encode call to the underlying
-// speech encoder.
-TEST_F(AudioEncoderCngTest, VerifyEncoderInfoPropagation) {
-  CreateCng();
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, &encoded_info_));
-  EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
-  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
-      .WillOnce(Return(Vad::kActive));
-  Encode();
-}
-
 // Verifies that the correct payload type is set when CNG is encoded.
 TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
   CreateCng();
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _)).Times(0);
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
   EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
   EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
       .WillOnce(Return(Vad::kPassive));
@@ -385,8 +375,7 @@
       .WillOnce(Return(Vad::kActive));
   AudioEncoder::EncodedInfo info;
   info.encoded_bytes = kMockReturnEncodedBytes;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .WillOnce(SetArgPointee<4>(info));
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).WillOnce(Return(info));
   Encode();
   EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
 
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
index cc01650..daecd51 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
@@ -56,11 +56,10 @@
   void SetProjectedPacketLossRate(double fraction) override;
 
  protected:
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
  private:
   // Deleter for use with scoped_ptr. E.g., use as
@@ -69,12 +68,8 @@
     inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
   };
 
-  void EncodePassive(size_t max_encoded_bytes,
-                    uint8_t* encoded,
-                    EncodedInfo* info);
-  void EncodeActive(size_t max_encoded_bytes,
-                    uint8_t* encoded,
-                    EncodedInfo* info);
+  EncodedInfo EncodePassive(size_t max_encoded_bytes, uint8_t* encoded);
+  EncodedInfo EncodeActive(size_t max_encoded_bytes, uint8_t* encoded);
   size_t SamplesPer10msFrame() const;
 
   AudioEncoder* speech_encoder_;
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 5c43a85..99566aa 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -66,11 +66,11 @@
   return num_10ms_frames_per_packet_;
 }
 
-void AudioEncoderPcm::EncodeInternal(uint32_t rtp_timestamp,
-                                     const int16_t* audio,
-                                     size_t max_encoded_bytes,
-                                     uint8_t* encoded,
-                                     EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   const int num_samples = SampleRateHz() / 100 * NumChannels();
   if (speech_buffer_.empty()) {
     first_timestamp_in_buffer_ = rtp_timestamp;
@@ -79,17 +79,18 @@
     speech_buffer_.push_back(audio[i]);
   }
   if (speech_buffer_.size() < full_frame_samples_) {
-    info->encoded_bytes = 0;
-    return;
+    return kZeroEncodedBytes;
   }
   CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
   CHECK_GE(max_encoded_bytes, full_frame_samples_);
   int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
   CHECK_GE(ret, 0);
   speech_buffer_.clear();
-  info->encoded_timestamp = first_timestamp_in_buffer_;
-  info->payload_type = payload_type_;
-  info->encoded_bytes = static_cast<size_t>(ret);
+  EncodedInfo info;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.encoded_bytes = static_cast<size_t>(ret);
+  return info;
 }
 
 int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
index e64bcea..6e588ec 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
@@ -41,11 +41,10 @@
  protected:
   AudioEncoderPcm(const Config& config, int sample_rate_hz);
 
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
   virtual int16_t EncodeCall(const int16_t* audio,
                              size_t input_len,
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index bbdcb1d..7eb4493 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -77,11 +77,11 @@
   return num_10ms_frames_per_packet_;
 }
 
-void AudioEncoderG722::EncodeInternal(uint32_t rtp_timestamp,
-                                      const int16_t* audio,
-                                      size_t max_encoded_bytes,
-                                      uint8_t* encoded,
-                                      EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
 
   if (num_10ms_frames_buffered_ == 0)
@@ -95,8 +95,7 @@
 
   // If we don't yet have enough samples for a packet, we're done for now.
   if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
-    info->encoded_bytes = 0;
-    return;
+    return kZeroEncodedBytes;
   }
 
   // Encode each channel separately.
@@ -124,9 +123,11 @@
       encoded[i * num_channels_ + j] =
           interleave_buffer_[2 * j] << 4 | interleave_buffer_[2 * j + 1];
   }
-  info->encoded_bytes = samples_per_channel / 2 * num_channels_;
-  info->encoded_timestamp = first_timestamp_in_buffer_;
-  info->payload_type = payload_type_;
+  EncodedInfo info;
+  info.encoded_bytes = samples_per_channel / 2 * num_channels_;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  return info;
 }
 
 int AudioEncoderG722::SamplesPerChannel() const {
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
index 81b44d6..b1be6b9 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
@@ -38,11 +38,10 @@
   int Max10MsFramesInAPacket() const override;
 
  protected:
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
  private:
   // The encoder state for one channel.
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 1e85a07..4971e7b 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -63,11 +63,11 @@
   return num_10ms_frames_per_packet_;
 }
 
-void AudioEncoderIlbc::EncodeInternal(uint32_t rtp_timestamp,
-                                      const int16_t* audio,
-                                      size_t max_encoded_bytes,
-                                      uint8_t* encoded,
-                                      EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
 
   // Save timestamp if starting a new packet.
@@ -82,8 +82,7 @@
   // If we don't yet have enough buffered input for a whole packet, we're done
   // for now.
   if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
-    info->encoded_bytes = 0;
-    return;
+    return kZeroEncodedBytes;
   }
 
   // Encode buffered input.
@@ -95,10 +94,12 @@
       kSampleRateHz / 100 * num_10ms_frames_per_packet_,
       encoded);
   CHECK_GE(output_len, 0);
-  info->encoded_bytes = output_len;
-  DCHECK_EQ(info->encoded_bytes, RequiredOutputSizeBytes());
-  info->encoded_timestamp = first_timestamp_in_buffer_;
-  info->payload_type = payload_type_;
+  EncodedInfo info;
+  info.encoded_bytes = output_len;
+  DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  return info;
 }
 
 size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
index a5378d1..91d17b4 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
@@ -38,11 +38,10 @@
   int Max10MsFramesInAPacket() const override;
 
  protected:
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
  private:
   size_t RequiredOutputSizeBytes() const;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index 6b197bc..95c83e8 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -84,11 +84,10 @@
 
  protected:
   // AudioEncoder protected method.
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
   // AudioDecoder protected method.
   int DecodeInternal(const uint8_t* encoded,
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index 87d71ab..02acfa6 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -184,11 +184,11 @@
 }
 
 template <typename T>
-void AudioEncoderDecoderIsacT<T>::EncodeInternal(uint32_t rtp_timestamp,
-                                                 const int16_t* audio,
-                                                 size_t max_encoded_bytes,
-                                                 uint8_t* encoded,
-                                                 EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   CriticalSectionScoped cs_lock(lock_.get());
   if (!packet_in_progress_) {
     // Starting a new packet; remember the timestamp for later.
@@ -206,15 +206,17 @@
   // buffer. All we can do is check for an overrun after the fact.
   CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
 
-  info->encoded_bytes = r;
   if (r == 0)
-    return;
+    return kZeroEncodedBytes;
 
   // Got enough input to produce a packet. Return the saved timestamp from
   // the first chunk of input that went into the packet.
   packet_in_progress_ = false;
-  info->encoded_timestamp = packet_timestamp_;
-  info->payload_type = payload_type_;
+  EncodedInfo info;
+  info.encoded_bytes = r;
+  info.encoded_timestamp = packet_timestamp_;
+  info.payload_type = payload_type_;
+  return info;
 }
 
 template <typename T>
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 7425e9a..25fd7a8 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -29,12 +29,11 @@
   MOCK_METHOD1(SetTargetBitrate, void(int));
   MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
   // Note, we explicitly chose not to create a mock for the Encode method.
-  MOCK_METHOD5(EncodeInternal,
-               void(uint32_t timestamp,
-                    const int16_t* audio,
-                    size_t max_encoded_bytes,
-                    uint8_t* encoded,
-                    EncodedInfo* info));
+  MOCK_METHOD4(EncodeInternal,
+               EncodedInfo(uint32_t timestamp,
+                           const int16_t* audio,
+                           size_t max_encoded_bytes,
+                           uint8_t* encoded));
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index ae08423..be92589 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -183,19 +183,18 @@
   }
 }
 
-void AudioEncoderOpus::EncodeInternal(uint32_t rtp_timestamp,
-                                      const int16_t* audio,
-                                      size_t max_encoded_bytes,
-                                      uint8_t* encoded,
-                                      EncodedInfo* info) {
+AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
   if (input_buffer_.empty())
     first_timestamp_in_buffer_ = rtp_timestamp;
   input_buffer_.insert(input_buffer_.end(), audio,
                        audio + samples_per_10ms_frame_);
   if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
                               samples_per_10ms_frame_)) {
-    info->encoded_bytes = 0;
-    return;
+    return kZeroEncodedBytes;
   }
   CHECK_EQ(input_buffer_.size(),
            static_cast<size_t>(num_10ms_frames_per_packet_) *
@@ -207,12 +206,13 @@
       ClampInt16(max_encoded_bytes), encoded);
   CHECK_GE(r, 0);  // Fails only if fed invalid data.
   input_buffer_.clear();
-  info->encoded_bytes = r;
-  info->encoded_timestamp = first_timestamp_in_buffer_;
-  info->payload_type = payload_type_;
-  // Allows Opus to send empty packets.
-  info->send_even_if_empty = true;
-  info->speech = r > 0;
+  EncodedInfo info;
+  info.encoded_bytes = r;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.send_even_if_empty = true;  // Allows Opus to send empty packets.
+  info.speech = r > 0;
+  return info;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
index 0a2a008..bd76b49 100644
--- a/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
@@ -58,11 +58,10 @@
   bool dtx_enabled() const { return dtx_enabled_; }
 
  protected:
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
  private:
   const int num_10ms_frames_per_packet_;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index 28c72fb..86f1158 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -60,48 +60,48 @@
   speech_encoder_->SetProjectedPacketLossRate(fraction);
 }
 
-void AudioEncoderCopyRed::EncodeInternal(uint32_t rtp_timestamp,
-                                         const int16_t* audio,
-                                         size_t max_encoded_bytes,
-                                         uint8_t* encoded,
-                                         EncodedInfo* info) {
-  speech_encoder_->Encode(rtp_timestamp, audio,
-                          static_cast<size_t>(SampleRateHz() / 100),
-                          max_encoded_bytes, encoded, info);
+AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
+    uint32_t rtp_timestamp,
+    const int16_t* audio,
+    size_t max_encoded_bytes,
+    uint8_t* encoded) {
+  EncodedInfo info = speech_encoder_->Encode(
+      rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
+      max_encoded_bytes, encoded);
   CHECK_GE(max_encoded_bytes,
-           info->encoded_bytes + secondary_info_.encoded_bytes);
-  CHECK(info->redundant.empty()) << "Cannot use nested redundant encoders.";
+           info.encoded_bytes + secondary_info_.encoded_bytes);
+  CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
 
-  if (info->encoded_bytes > 0) {
+  if (info.encoded_bytes > 0) {
     // |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
     // discarding the (empty) vector of redundant information. This is
     // intentional.
-    info->redundant.push_back(*info);
-    DCHECK_EQ(info->redundant.size(), 1u);
+    info.redundant.push_back(info);
+    DCHECK_EQ(info.redundant.size(), 1u);
     if (secondary_info_.encoded_bytes > 0) {
-      memcpy(&encoded[info->encoded_bytes], secondary_encoded_.get(),
+      memcpy(&encoded[info.encoded_bytes], secondary_encoded_.get(),
              secondary_info_.encoded_bytes);
-      info->redundant.push_back(secondary_info_);
-      DCHECK_EQ(info->redundant.size(), 2u);
+      info.redundant.push_back(secondary_info_);
+      DCHECK_EQ(info.redundant.size(), 2u);
     }
     // Save primary to secondary.
-    if (secondary_allocated_ < info->encoded_bytes) {
-      secondary_encoded_.reset(new uint8_t[info->encoded_bytes]);
-      secondary_allocated_ = info->encoded_bytes;
+    if (secondary_allocated_ < info.encoded_bytes) {
+      secondary_encoded_.reset(new uint8_t[info.encoded_bytes]);
+      secondary_allocated_ = info.encoded_bytes;
     }
     CHECK(secondary_encoded_);
-    memcpy(secondary_encoded_.get(), encoded, info->encoded_bytes);
-    secondary_info_ = *info;
-    DCHECK_EQ(info->speech, info->redundant[0].speech);
+    memcpy(secondary_encoded_.get(), encoded, info.encoded_bytes);
+    secondary_info_ = info;
+    DCHECK_EQ(info.speech, info.redundant[0].speech);
   }
   // Update main EncodedInfo.
-  info->payload_type = red_payload_type_;
-  info->encoded_bytes = 0;
-  for (std::vector<EncodedInfoLeaf>::const_iterator it =
-           info->redundant.begin();
-       it != info->redundant.end(); ++it) {
-    info->encoded_bytes += it->encoded_bytes;
+  info.payload_type = red_payload_type_;
+  info.encoded_bytes = 0;
+  for (std::vector<EncodedInfoLeaf>::const_iterator it = info.redundant.begin();
+       it != info.redundant.end(); ++it) {
+    info.encoded_bytes += it->encoded_bytes;
   }
+  return info;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 7ce9ca0..fd92d52 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -45,11 +45,10 @@
   void SetProjectedPacketLossRate(double fraction) override;
 
  protected:
-  void EncodeInternal(uint32_t rtp_timestamp,
-                      const int16_t* audio,
-                      size_t max_encoded_bytes,
-                      uint8_t* encoded,
-                      EncodedInfo* info) override;
+  EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
+                             const int16_t* audio,
+                             size_t max_encoded_bytes,
+                             uint8_t* encoded) override;
 
  private:
   AudioEncoder* speech_encoder_;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index 2ae2fa2..14c30d0 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -60,9 +60,8 @@
 
   void Encode() {
     ASSERT_TRUE(red_.get() != NULL);
-    encoded_info_ = AudioEncoder::EncodedInfo();
-    red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
-                 encoded_.size(), &encoded_[0], &encoded_info_);
+    encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
+                                 encoded_.size(), &encoded_[0]);
     timestamp_ += num_audio_samples_10ms;
   }
 
@@ -83,18 +82,16 @@
     memset(&info_, 0, sizeof(info_));
   }
 
-  void Encode(uint32_t timestamp,
-              const int16_t* audio,
-              size_t max_encoded_bytes,
-              uint8_t* encoded,
-              AudioEncoder::EncodedInfo* info) {
+  AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
+                                   const int16_t* audio,
+                                   size_t max_encoded_bytes,
+                                   uint8_t* encoded) {
     if (write_payload_) {
       CHECK(encoded);
       CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
       memcpy(encoded, payload_, info_.encoded_bytes);
     }
-    CHECK(info);
-    *info = info_;
+    return info_;
   }
 
   AudioEncoder::EncodedInfo info_;
@@ -144,7 +141,8 @@
   InSequence s;
   MockFunction<void(int check_point_id)> check;
   for (int i = 1; i <= 6; ++i) {
-    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _));
+    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+        .WillRepeatedly(Return(AudioEncoder::kZeroEncodedBytes));
     EXPECT_CALL(check, Call(i));
     Encode();
     check.Call(i);
@@ -153,13 +151,13 @@
 
 // Checks that no output is produced if the underlying codec doesn't emit any
 // new data, even if the RED codec is loaded with a secondary encoding.
-TEST_F(AudioEncoderCopyRedTest, CheckNoOuput) {
+TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
   // Start with one Encode() call that will produce output.
   static const size_t kEncodedSize = 17;
-  AudioEncoder::EncodedInfo info;
-  info.encoded_bytes = kEncodedSize;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .WillOnce(SetArgPointee<4>(info));
+  AudioEncoder::EncodedInfo nonZeroEncodedBytes;
+  nonZeroEncodedBytes.encoded_bytes = kEncodedSize;
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .WillOnce(Return(nonZeroEncodedBytes));
   Encode();
   // First call is a special case, since it does not include a secondary
   // payload.
@@ -167,16 +165,14 @@
   EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
 
   // Next call to the speech encoder will not produce any output.
-  info.encoded_bytes = 0;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .WillOnce(SetArgPointee<4>(info));
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .WillOnce(Return(AudioEncoder::kZeroEncodedBytes));
   Encode();
   EXPECT_EQ(0u, encoded_info_.encoded_bytes);
 
   // Final call to the speech encoder will produce output.
-  info.encoded_bytes = kEncodedSize;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-      .WillOnce(SetArgPointee<4>(info));
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+      .WillOnce(Return(nonZeroEncodedBytes));
   Encode();
   EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
   ASSERT_EQ(2u, encoded_info_.redundant.size());
@@ -192,8 +188,8 @@
   for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
     AudioEncoder::EncodedInfo info;
     info.encoded_bytes = encode_size;
-    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
-        .WillOnce(SetArgPointee<4>(info));
+    EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
+        .WillOnce(Return(info));
   }
 
   // First call is a special case, since it does not include a secondary
@@ -218,7 +214,7 @@
   helper.info_.encoded_bytes = 17;
   helper.info_.encoded_timestamp = timestamp_;
   uint32_t primary_timestamp = timestamp_;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
       .WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
 
   // First call is a special case, since it does not include a secondary
@@ -249,7 +245,7 @@
     payload[i] = i;
   }
   helper.payload_ = payload;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
       .WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
 
   // First call is a special case, since it does not include a secondary
@@ -286,7 +282,7 @@
   helper.info_.encoded_bytes = 17;
   const int primary_payload_type = red_payload_type_ + 1;
   helper.info_.payload_type = primary_payload_type;
-  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _, _))
+  EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
       .WillRepeatedly(Invoke(&helper, &MockEncodeHelper::Encode));
 
   // First call is a special case, since it does not include a secondary
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc b/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc
index 3a6a6ef..a47dbda 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc
@@ -234,8 +234,8 @@
   first_frame_ = false;
   CHECK_EQ(audio_channel, encoder_->NumChannels());
 
-  encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
-                   2 * MAX_PAYLOAD_SIZE_BYTE, bitstream, encoded_info);
+  *encoded_info = encoder_->Encode(rtp_timestamp_, audio, length_per_channel,
+                                   2 * MAX_PAYLOAD_SIZE_BYTE, bitstream);
   *bitstream_len_byte = static_cast<int16_t>(encoded_info->encoded_bytes);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index e319b00..9f3b0fe 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -150,9 +150,9 @@
                                                  samples_per_10ms, channels_,
                                                  interleaved_input.get());
 
-      audio_encoder_->Encode(0, interleaved_input.get(),
-                             audio_encoder_->SampleRateHz() / 100,
-                             data_length_ * 2, output, &encoded_info_);
+      encoded_info_ = audio_encoder_->Encode(
+          0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
+          data_length_ * 2, output);
     }
     EXPECT_EQ(payload_type_, encoded_info_.payload_type);
     return static_cast<int>(encoded_info_.encoded_bytes);