Convert channel counts to size_t.

IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.

BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org

Review URL: https://codereview.webrtc.org/1316523002 .

Cr-Commit-Position: refs/heads/master@{#11229}
diff --git a/talk/app/webrtc/mediastreaminterface.h b/talk/app/webrtc/mediastreaminterface.h
index 89a4abe..9b137d9 100644
--- a/talk/app/webrtc/mediastreaminterface.h
+++ b/talk/app/webrtc/mediastreaminterface.h
@@ -151,7 +151,7 @@
   virtual void OnData(const void* audio_data,
                       int bits_per_sample,
                       int sample_rate,
-                      int number_of_channels,
+                      size_t number_of_channels,
                       size_t number_of_frames) = 0;
 
  protected:
diff --git a/talk/app/webrtc/rtpsender.cc b/talk/app/webrtc/rtpsender.cc
index c0d23a0..91e484b 100644
--- a/talk/app/webrtc/rtpsender.cc
+++ b/talk/app/webrtc/rtpsender.cc
@@ -44,7 +44,7 @@
 void LocalAudioSinkAdapter::OnData(const void* audio_data,
                                    int bits_per_sample,
                                    int sample_rate,
-                                   int number_of_channels,
+                                   size_t number_of_channels,
                                    size_t number_of_frames) {
   rtc::CritScope lock(&lock_);
   if (sink_) {
diff --git a/talk/app/webrtc/rtpsender.h b/talk/app/webrtc/rtpsender.h
index d5f88a9..dd846b5 100644
--- a/talk/app/webrtc/rtpsender.h
+++ b/talk/app/webrtc/rtpsender.h
@@ -57,7 +57,7 @@
   void OnData(const void* audio_data,
               int bits_per_sample,
               int sample_rate,
-              int number_of_channels,
+              size_t number_of_channels,
               size_t number_of_frames) override;
 
   // cricket::AudioRenderer implementation.
diff --git a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index e2dc123..6b675a9 100644
--- a/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/talk/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -58,7 +58,7 @@
   int32_t RecordedDataIsAvailable(const void* audioSamples,
                                   const size_t nSamples,
                                   const size_t nBytesPerSample,
-                                  const uint8_t nChannels,
+                                  const size_t nChannels,
                                   const uint32_t samplesPerSec,
                                   const uint32_t totalDelayMS,
                                   const int32_t clockDrift,
@@ -82,7 +82,7 @@
   // ADM is pulling data.
   int32_t NeedMorePlayData(const size_t nSamples,
                            const size_t nBytesPerSample,
-                           const uint8_t nChannels,
+                           const size_t nChannels,
                            const uint32_t samplesPerSec,
                            void* audioSamples,
                            size_t& nSamplesOut,
diff --git a/talk/app/webrtc/webrtcsdp.cc b/talk/app/webrtc/webrtcsdp.cc
index 07a4eb9..e287e90 100644
--- a/talk/app/webrtc/webrtcsdp.cc
+++ b/talk/app/webrtc/webrtcsdp.cc
@@ -2064,7 +2064,7 @@
 struct StaticPayloadAudioCodec {
   const char* name;
   int clockrate;
-  int channels;
+  size_t channels;
 };
 static const StaticPayloadAudioCodec kStaticPayloadAudioCodecs[] = {
   { "PCMU", 8000, 1 },
@@ -2103,7 +2103,7 @@
         payload_type < arraysize(kStaticPayloadAudioCodecs)) {
       std::string encoding_name = kStaticPayloadAudioCodecs[payload_type].name;
       int clock_rate = kStaticPayloadAudioCodecs[payload_type].clockrate;
-      int channels = kStaticPayloadAudioCodecs[payload_type].channels;
+      size_t channels = kStaticPayloadAudioCodecs[payload_type].channels;
       media_desc->AddCodec(cricket::AudioCodec(payload_type, encoding_name,
                                                clock_rate, 0, channels,
                                                preference));
@@ -2838,7 +2838,7 @@
 // Updates or creates a new codec entry in the audio description with according
 // to |name|, |clockrate|, |bitrate|, |channels| and |preference|.
 void UpdateCodec(int payload_type, const std::string& name, int clockrate,
-                 int bitrate, int channels, int preference,
+                 int bitrate, size_t channels, int preference,
                  AudioContentDescription* audio_desc) {
   // Codec may already be populated with (only) optional parameters
   // (from an fmtp).
@@ -2937,7 +2937,7 @@
     // of audio channels.  This parameter is OPTIONAL and may be
     // omitted if the number of channels is one, provided that no
     // additional parameters are needed.
-    int channels = 1;
+    size_t channels = 1;
     if (codec_params.size() == 3) {
       if (!GetValueFromString(line, codec_params[2], &channels, error)) {
         return false;
diff --git a/talk/media/base/audiorenderer.h b/talk/media/base/audiorenderer.h
index 229c36e..a42cd7d 100644
--- a/talk/media/base/audiorenderer.h
+++ b/talk/media/base/audiorenderer.h
@@ -41,7 +41,7 @@
     virtual void OnData(const void* audio_data,
                         int bits_per_sample,
                         int sample_rate,
-                        int number_of_channels,
+                        size_t number_of_channels,
                         size_t number_of_frames) = 0;
 
     // Called when the AudioRenderer is going away.
diff --git a/talk/media/base/codec.cc b/talk/media/base/codec.cc
index 101d2be..59708b3 100644
--- a/talk/media/base/codec.cc
+++ b/talk/media/base/codec.cc
@@ -167,7 +167,7 @@
                        const std::string& name,
                        int clockrate,
                        int bitrate,
-                       int channels,
+                       size_t channels,
                        int preference)
     : Codec(id, name, clockrate, preference),
       bitrate(bitrate),
diff --git a/talk/media/base/codec.h b/talk/media/base/codec.h
index fcdb6ea..da78e1c 100644
--- a/talk/media/base/codec.h
+++ b/talk/media/base/codec.h
@@ -128,14 +128,14 @@
 
 struct AudioCodec : public Codec {
   int bitrate;
-  int channels;
+  size_t channels;
 
   // Creates a codec with the given parameters.
   AudioCodec(int id,
              const std::string& name,
              int clockrate,
              int bitrate,
-             int channels,
+             size_t channels,
              int preference);
   // Creates an empty codec.
   AudioCodec();
diff --git a/talk/media/base/fakemediaengine.h b/talk/media/base/fakemediaengine.h
index f5b2174..149704f 100644
--- a/talk/media/base/fakemediaengine.h
+++ b/talk/media/base/fakemediaengine.h
@@ -368,7 +368,7 @@
     void OnData(const void* audio_data,
                 int bits_per_sample,
                 int sample_rate,
-                int number_of_channels,
+                size_t number_of_channels,
                 size_t number_of_frames) override {}
     void OnClose() override { renderer_ = NULL; }
     AudioRenderer* renderer() const { return renderer_; }
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index eb3b4d3..65ba927 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -77,10 +77,10 @@
   WEBRTC_STUB_CONST(input_sample_rate_hz, ());
   WEBRTC_STUB_CONST(proc_sample_rate_hz, ());
   WEBRTC_STUB_CONST(proc_split_sample_rate_hz, ());
-  WEBRTC_STUB_CONST(num_input_channels, ());
-  WEBRTC_STUB_CONST(num_proc_channels, ());
-  WEBRTC_STUB_CONST(num_output_channels, ());
-  WEBRTC_STUB_CONST(num_reverse_channels, ());
+  size_t num_input_channels() const override { return 0; }
+  size_t num_proc_channels() const override { return 0; }
+  size_t num_output_channels() const override { return 0; }
+  size_t num_reverse_channels() const override { return 0; }
   WEBRTC_VOID_STUB(set_output_will_be_muted, (bool muted));
   WEBRTC_STUB(ProcessStream, (webrtc::AudioFrame* frame));
   WEBRTC_STUB(ProcessStream, (
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index 9eee2af..9742564 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -410,7 +410,7 @@
   struct CodecPref {
     const char* name;
     int clockrate;
-    int channels;
+    size_t channels;
     int payload_type;
     bool is_multi_rate;
     int packet_sizes_ms[kMaxNumPacketSize];
@@ -1155,7 +1155,7 @@
   void OnData(const void* audio_data,
               int bits_per_sample,
               int sample_rate,
-              int number_of_channels,
+              size_t number_of_channels,
               size_t number_of_frames) override {
     RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
     RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread());
diff --git a/webrtc/audio/audio_sink.h b/webrtc/audio/audio_sink.h
index d022b32..999644f 100644
--- a/webrtc/audio/audio_sink.h
+++ b/webrtc/audio/audio_sink.h
@@ -30,7 +30,7 @@
     Data(int16_t* data,
          size_t samples_per_channel,
          int sample_rate,
-         int channels,
+         size_t channels,
          uint32_t timestamp)
         : data(data),
           samples_per_channel(samples_per_channel),
@@ -41,7 +41,7 @@
     int16_t* data;               // The actual 16bit audio data.
     size_t samples_per_channel;  // Number of frames in the buffer.
     int sample_rate;             // Sample rate in Hz.
-    int channels;                // Number of channels in the audio data.
+    size_t channels;             // Number of channels in the audio data.
     uint32_t timestamp;          // The RTP timestamp of the first sample.
   };
 
diff --git a/webrtc/common_audio/audio_converter.cc b/webrtc/common_audio/audio_converter.cc
index 9bb5895..9ebfabc 100644
--- a/webrtc/common_audio/audio_converter.cc
+++ b/webrtc/common_audio/audio_converter.cc
@@ -25,7 +25,7 @@
 
 class CopyConverter : public AudioConverter {
  public:
-  CopyConverter(int src_channels, size_t src_frames, int dst_channels,
+  CopyConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
                 size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
   ~CopyConverter() override {};
@@ -34,7 +34,7 @@
                size_t dst_capacity) override {
     CheckSizes(src_size, dst_capacity);
     if (src != dst) {
-      for (int i = 0; i < src_channels(); ++i)
+      for (size_t i = 0; i < src_channels(); ++i)
         std::memcpy(dst[i], src[i], dst_frames() * sizeof(*dst[i]));
     }
   }
@@ -42,7 +42,7 @@
 
 class UpmixConverter : public AudioConverter {
  public:
-  UpmixConverter(int src_channels, size_t src_frames, int dst_channels,
+  UpmixConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
                  size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
   ~UpmixConverter() override {};
@@ -52,7 +52,7 @@
     CheckSizes(src_size, dst_capacity);
     for (size_t i = 0; i < dst_frames(); ++i) {
       const float value = src[0][i];
-      for (int j = 0; j < dst_channels(); ++j)
+      for (size_t j = 0; j < dst_channels(); ++j)
         dst[j][i] = value;
     }
   }
@@ -60,7 +60,7 @@
 
 class DownmixConverter : public AudioConverter {
  public:
-  DownmixConverter(int src_channels, size_t src_frames, int dst_channels,
+  DownmixConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
                    size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
   }
@@ -72,7 +72,7 @@
     float* dst_mono = dst[0];
     for (size_t i = 0; i < src_frames(); ++i) {
       float sum = 0;
-      for (int j = 0; j < src_channels(); ++j)
+      for (size_t j = 0; j < src_channels(); ++j)
         sum += src[j][i];
       dst_mono[i] = sum / src_channels();
     }
@@ -81,11 +81,11 @@
 
 class ResampleConverter : public AudioConverter {
  public:
-  ResampleConverter(int src_channels, size_t src_frames, int dst_channels,
+  ResampleConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
                     size_t dst_frames)
       : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
     resamplers_.reserve(src_channels);
-    for (int i = 0; i < src_channels; ++i)
+    for (size_t i = 0; i < src_channels; ++i)
       resamplers_.push_back(new PushSincResampler(src_frames, dst_frames));
   }
   ~ResampleConverter() override {};
@@ -136,9 +136,9 @@
   ScopedVector<ChannelBuffer<float>> buffers_;
 };
 
-rtc::scoped_ptr<AudioConverter> AudioConverter::Create(int src_channels,
+rtc::scoped_ptr<AudioConverter> AudioConverter::Create(size_t src_channels,
                                                        size_t src_frames,
-                                                       int dst_channels,
+                                                       size_t dst_channels,
                                                        size_t dst_frames) {
   rtc::scoped_ptr<AudioConverter> sp;
   if (src_channels > dst_channels) {
@@ -183,8 +183,8 @@
       dst_channels_(0),
       dst_frames_(0) {}
 
-AudioConverter::AudioConverter(int src_channels, size_t src_frames,
-                               int dst_channels, size_t dst_frames)
+AudioConverter::AudioConverter(size_t src_channels, size_t src_frames,
+                               size_t dst_channels, size_t dst_frames)
     : src_channels_(src_channels),
       src_frames_(src_frames),
       dst_channels_(dst_channels),
diff --git a/webrtc/common_audio/audio_converter.h b/webrtc/common_audio/audio_converter.h
index 7d1513b..c5f08c1 100644
--- a/webrtc/common_audio/audio_converter.h
+++ b/webrtc/common_audio/audio_converter.h
@@ -26,9 +26,9 @@
  public:
   // Returns a new AudioConverter, which will use the supplied format for its
   // lifetime. Caller is responsible for the memory.
-  static rtc::scoped_ptr<AudioConverter> Create(int src_channels,
+  static rtc::scoped_ptr<AudioConverter> Create(size_t src_channels,
                                                 size_t src_frames,
-                                                int dst_channels,
+                                                size_t dst_channels,
                                                 size_t dst_frames);
   virtual ~AudioConverter() {};
 
@@ -39,23 +39,23 @@
   virtual void Convert(const float* const* src, size_t src_size,
                        float* const* dst, size_t dst_capacity) = 0;
 
-  int src_channels() const { return src_channels_; }
+  size_t src_channels() const { return src_channels_; }
   size_t src_frames() const { return src_frames_; }
-  int dst_channels() const { return dst_channels_; }
+  size_t dst_channels() const { return dst_channels_; }
   size_t dst_frames() const { return dst_frames_; }
 
  protected:
   AudioConverter();
-  AudioConverter(int src_channels, size_t src_frames, int dst_channels,
+  AudioConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
                  size_t dst_frames);
 
   // Helper to RTC_CHECK that inputs are correctly sized.
   void CheckSizes(size_t src_size, size_t dst_capacity) const;
 
  private:
-  const int src_channels_;
+  const size_t src_channels_;
   const size_t src_frames_;
-  const int dst_channels_;
+  const size_t dst_channels_;
   const size_t dst_frames_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(AudioConverter);
diff --git a/webrtc/common_audio/audio_converter_unittest.cc b/webrtc/common_audio/audio_converter_unittest.cc
index e373d78..dace0bd 100644
--- a/webrtc/common_audio/audio_converter_unittest.cc
+++ b/webrtc/common_audio/audio_converter_unittest.cc
@@ -26,9 +26,9 @@
 
 // Sets the signal value to increase by |data| with every sample.
 ScopedBuffer CreateBuffer(const std::vector<float>& data, size_t frames) {
-  const int num_channels = static_cast<int>(data.size());
+  const size_t num_channels = data.size();
   ScopedBuffer sb(new ChannelBuffer<float>(frames, num_channels));
-  for (int i = 0; i < num_channels; ++i)
+  for (size_t i = 0; i < num_channels; ++i)
     for (size_t j = 0; j < frames; ++j)
       sb->channels()[i][j] = data[i] * j;
   return sb;
@@ -57,7 +57,7 @@
     float mse = 0;
     float variance = 0;
     float mean = 0;
-    for (int i = 0; i < ref.num_channels(); ++i) {
+    for (size_t i = 0; i < ref.num_channels(); ++i) {
       for (size_t j = 0; j < ref.num_frames() - delay; ++j) {
         float error = ref.channels()[i][j] - test.channels()[i][j + delay];
         mse += error * error;
@@ -86,9 +86,9 @@
 // Sets the source to a linearly increasing signal for which we can easily
 // generate a reference. Runs the AudioConverter and ensures the output has
 // sufficiently high SNR relative to the reference.
-void RunAudioConverterTest(int src_channels,
+void RunAudioConverterTest(size_t src_channels,
                            int src_sample_rate_hz,
-                           int dst_channels,
+                           size_t dst_channels,
                            int dst_sample_rate_hz) {
   const float kSrcLeft = 0.0002f;
   const float kSrcRight = 0.0001f;
@@ -128,8 +128,9 @@
       static_cast<size_t>(
           PushSincResampler::AlgorithmicDelaySeconds(src_sample_rate_hz) *
           dst_sample_rate_hz);
-  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
-      src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
+  // SNR reported on the same line later.
+  printf("(%" PRIuS ", %d Hz) -> (%" PRIuS ", %d Hz) ",
+         src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
 
   rtc::scoped_ptr<AudioConverter> converter = AudioConverter::Create(
       src_channels, src_frames, dst_channels, dst_frames);
@@ -142,7 +143,7 @@
 
 TEST(AudioConverterTest, ConversionsPassSNRThreshold) {
   const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000};
-  const int kChannels[] = {1, 2};
+  const size_t kChannels[] = {1, 2};
   for (size_t src_rate = 0; src_rate < arraysize(kSampleRates); ++src_rate) {
     for (size_t dst_rate = 0; dst_rate < arraysize(kSampleRates); ++dst_rate) {
       for (size_t src_channel = 0; src_channel < arraysize(kChannels);
diff --git a/webrtc/common_audio/blocker.cc b/webrtc/common_audio/blocker.cc
index 0133550..13432f2 100644
--- a/webrtc/common_audio/blocker.cc
+++ b/webrtc/common_audio/blocker.cc
@@ -22,10 +22,10 @@
                const float* const* b,
                int b_start_index,
                size_t num_frames,
-               int num_channels,
+               size_t num_channels,
                float* const* result,
                size_t result_start_index) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     for (size_t j = 0; j < num_frames; ++j) {
       result[i][j + result_start_index] =
           a[i][j + a_start_index] + b[i][j + b_start_index];
@@ -37,10 +37,10 @@
 void CopyFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
-                int num_channels,
+                size_t num_channels,
                 float* const* dst,
                 size_t dst_start_index) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     memcpy(&dst[i][dst_start_index],
            &src[i][src_start_index],
            num_frames * sizeof(dst[i][dst_start_index]));
@@ -51,10 +51,10 @@
 void MoveFrames(const float* const* src,
                 size_t src_start_index,
                 size_t num_frames,
-                int num_channels,
+                size_t num_channels,
                 float* const* dst,
                 size_t dst_start_index) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     memmove(&dst[i][dst_start_index],
             &src[i][src_start_index],
             num_frames * sizeof(dst[i][dst_start_index]));
@@ -64,8 +64,8 @@
 void ZeroOut(float* const* buffer,
              size_t starting_idx,
              size_t num_frames,
-             int num_channels) {
-  for (int i = 0; i < num_channels; ++i) {
+             size_t num_channels) {
+  for (size_t i = 0; i < num_channels; ++i) {
     memset(&buffer[i][starting_idx], 0,
            num_frames * sizeof(buffer[i][starting_idx]));
   }
@@ -75,9 +75,9 @@
 // stored in |frames|.
 void ApplyWindow(const float* window,
                  size_t num_frames,
-                 int num_channels,
+                 size_t num_channels,
                  float* const* frames) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     for (size_t j = 0; j < num_frames; ++j) {
       frames[i][j] = frames[i][j] * window[j];
     }
@@ -100,8 +100,8 @@
 
 Blocker::Blocker(size_t chunk_size,
                  size_t block_size,
-                 int num_input_channels,
-                 int num_output_channels,
+                 size_t num_input_channels,
+                 size_t num_output_channels,
                  const float* window,
                  size_t shift_amount,
                  BlockerCallback* callback)
@@ -166,8 +166,8 @@
 // TODO(claguna): Look at using ring buffers to eliminate some copies.
 void Blocker::ProcessChunk(const float* const* input,
                            size_t chunk_size,
-                           int num_input_channels,
-                           int num_output_channels,
+                           size_t num_input_channels,
+                           size_t num_output_channels,
                            float* const* output) {
   RTC_CHECK_EQ(chunk_size, chunk_size_);
   RTC_CHECK_EQ(num_input_channels, num_input_channels_);
diff --git a/webrtc/common_audio/blocker.h b/webrtc/common_audio/blocker.h
index 025638a..3a67c13 100644
--- a/webrtc/common_audio/blocker.h
+++ b/webrtc/common_audio/blocker.h
@@ -26,8 +26,8 @@
 
   virtual void ProcessBlock(const float* const* input,
                             size_t num_frames,
-                            int num_input_channels,
-                            int num_output_channels,
+                            size_t num_input_channels,
+                            size_t num_output_channels,
                             float* const* output) = 0;
 };
 
@@ -65,23 +65,23 @@
  public:
   Blocker(size_t chunk_size,
           size_t block_size,
-          int num_input_channels,
-          int num_output_channels,
+          size_t num_input_channels,
+          size_t num_output_channels,
           const float* window,
           size_t shift_amount,
           BlockerCallback* callback);
 
   void ProcessChunk(const float* const* input,
                     size_t chunk_size,
-                    int num_input_channels,
-                    int num_output_channels,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
                     float* const* output);
 
  private:
   const size_t chunk_size_;
   const size_t block_size_;
-  const int num_input_channels_;
-  const int num_output_channels_;
+  const size_t num_input_channels_;
+  const size_t num_output_channels_;
 
   // The number of frames of delay to add at the beginning of the first chunk.
   const size_t initial_delay_;
diff --git a/webrtc/common_audio/blocker_unittest.cc b/webrtc/common_audio/blocker_unittest.cc
index 065c09e..a5a7b56 100644
--- a/webrtc/common_audio/blocker_unittest.cc
+++ b/webrtc/common_audio/blocker_unittest.cc
@@ -20,10 +20,10 @@
  public:
   void ProcessBlock(const float* const* input,
                     size_t num_frames,
-                    int num_input_channels,
-                    int num_output_channels,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
                     float* const* output) override {
-    for (int i = 0; i < num_output_channels; ++i) {
+    for (size_t i = 0; i < num_output_channels; ++i) {
       for (size_t j = 0; j < num_frames; ++j) {
         output[i][j] = input[i][j] + 3;
       }
@@ -36,10 +36,10 @@
  public:
   void ProcessBlock(const float* const* input,
                     size_t num_frames,
-                    int num_input_channels,
-                    int num_output_channels,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
                     float* const* output) override {
-    for (int i = 0; i < num_output_channels; ++i) {
+    for (size_t i = 0; i < num_output_channels; ++i) {
       for (size_t j = 0; j < num_frames; ++j) {
         output[i][j] = input[i][j];
       }
@@ -63,8 +63,8 @@
                float* const* input_chunk,
                float* const* output,
                float* const* output_chunk,
-               int num_input_channels,
-               int num_output_channels) {
+               size_t num_input_channels,
+               size_t num_output_channels) {
     size_t start = 0;
     size_t end = chunk_size - 1;
     while (end < num_frames) {
@@ -83,9 +83,9 @@
 
   void ValidateSignalEquality(const float* const* expected,
                               const float* const* actual,
-                              int num_channels,
+                              size_t num_channels,
                               size_t num_frames) {
-    for (int i = 0; i < num_channels; ++i) {
+    for (size_t i = 0; i < num_channels; ++i) {
       for (size_t j = 0; j < num_frames; ++j) {
         EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
       }
@@ -93,10 +93,10 @@
   }
 
   void ValidateInitialDelay(const float* const* output,
-                            int num_channels,
+                            size_t num_channels,
                             size_t num_frames,
                             size_t initial_delay) {
-    for (int i = 0; i < num_channels; ++i) {
+    for (size_t i = 0; i < num_channels; ++i) {
       for (size_t j = 0; j < num_frames; ++j) {
         if (j < initial_delay) {
           EXPECT_FLOAT_EQ(output[i][j], 0.f);
@@ -110,10 +110,10 @@
   static void CopyTo(float* const* dst,
                      size_t start_index_dst,
                      size_t start_index_src,
-                     int num_channels,
+                     size_t num_channels,
                      size_t num_frames,
                      const float* const* src) {
-    for (int i = 0; i < num_channels; ++i) {
+    for (size_t i = 0; i < num_channels; ++i) {
       memcpy(&dst[i][start_index_dst],
              &src[i][start_index_src],
              num_frames * sizeof(float));
@@ -122,8 +122,8 @@
 };
 
 TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
-  const int kNumInputChannels = 3;
-  const int kNumOutputChannels = 2;
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
   const size_t kNumFrames = 10;
   const size_t kBlockSize = 4;
   const size_t kChunkSize = 5;
@@ -175,8 +175,8 @@
 }
 
 TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
-  const int kNumInputChannels = 3;
-  const int kNumOutputChannels = 2;
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
   const size_t kNumFrames = 12;
   const size_t kBlockSize = 4;
   const size_t kChunkSize = 6;
@@ -228,8 +228,8 @@
 }
 
 TEST_F(BlockerTest, TestBlockerNoOverlap) {
-  const int kNumInputChannels = 3;
-  const int kNumOutputChannels = 2;
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
   const size_t kNumFrames = 12;
   const size_t kBlockSize = 4;
   const size_t kChunkSize = 4;
@@ -281,8 +281,8 @@
 }
 
 TEST_F(BlockerTest, InitialDelaysAreMinimum) {
-  const int kNumInputChannels = 3;
-  const int kNumOutputChannels = 2;
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
   const size_t kNumFrames = 1280;
   const size_t kChunkSize[] =
       {80, 80, 80, 80, 80, 80, 160, 160, 160, 160, 160, 160};
@@ -294,7 +294,7 @@
       {48, 48, 48, 112, 112, 112, 96, 96, 96, 224, 224, 224};
 
   float input[kNumInputChannels][kNumFrames];
-  for (int i = 0; i < kNumInputChannels; ++i) {
+  for (size_t i = 0; i < kNumInputChannels; ++i) {
     for (size_t j = 0; j < kNumFrames; ++j) {
       input[i][j] = i + 1;
     }
diff --git a/webrtc/common_audio/channel_buffer.cc b/webrtc/common_audio/channel_buffer.cc
index d3dc7c0..44520c6 100644
--- a/webrtc/common_audio/channel_buffer.cc
+++ b/webrtc/common_audio/channel_buffer.cc
@@ -13,7 +13,7 @@
 namespace webrtc {
 
 IFChannelBuffer::IFChannelBuffer(size_t num_frames,
-                                 int num_channels,
+                                 size_t num_channels,
                                  size_t num_bands)
     : ivalid_(true),
       ibuf_(num_frames, num_channels, num_bands),
@@ -47,7 +47,7 @@
     assert(ivalid_);
     const int16_t* const* int_channels = ibuf_.channels();
     float* const* float_channels = fbuf_.channels();
-    for (int i = 0; i < ibuf_.num_channels(); ++i) {
+    for (size_t i = 0; i < ibuf_.num_channels(); ++i) {
       for (size_t j = 0; j < ibuf_.num_frames(); ++j) {
         float_channels[i][j] = int_channels[i][j];
       }
@@ -61,7 +61,7 @@
     assert(fvalid_);
     int16_t* const* int_channels = ibuf_.channels();
     const float* const* float_channels = fbuf_.channels();
-    for (int i = 0; i < ibuf_.num_channels(); ++i) {
+    for (size_t i = 0; i < ibuf_.num_channels(); ++i) {
       FloatS16ToS16(float_channels[i],
                     ibuf_.num_frames(),
                     int_channels[i]);
diff --git a/webrtc/common_audio/channel_buffer.h b/webrtc/common_audio/channel_buffer.h
index 6050090..d906916 100644
--- a/webrtc/common_audio/channel_buffer.h
+++ b/webrtc/common_audio/channel_buffer.h
@@ -40,7 +40,7 @@
 class ChannelBuffer {
  public:
   ChannelBuffer(size_t num_frames,
-                int num_channels,
+                size_t num_channels,
                 size_t num_bands = 1)
       : data_(new T[num_frames * num_channels]()),
         channels_(new T*[num_channels * num_bands]),
@@ -49,7 +49,7 @@
         num_frames_per_band_(num_frames / num_bands),
         num_channels_(num_channels),
         num_bands_(num_bands) {
-    for (int i = 0; i < num_channels_; ++i) {
+    for (size_t i = 0; i < num_channels_; ++i) {
       for (size_t j = 0; j < num_bands_; ++j) {
         channels_[j * num_channels_ + i] =
             &data_[i * num_frames_ + j * num_frames_per_band_];
@@ -90,12 +90,12 @@
   // 0 <= channel < |num_channels_|
   // 0 <= band < |num_bands_|
   // 0 <= sample < |num_frames_per_band_|
-  const T* const* bands(int channel) const {
+  const T* const* bands(size_t channel) const {
     RTC_DCHECK_LT(channel, num_channels_);
-    RTC_DCHECK_GE(channel, 0);
+    RTC_DCHECK_GE(channel, 0u);
     return &bands_[channel * num_bands_];
   }
-  T* const* bands(int channel) {
+  T* const* bands(size_t channel) {
     const ChannelBuffer<T>* t = this;
     return const_cast<T* const*>(t->bands(channel));
   }
@@ -104,7 +104,7 @@
   // Returns |slice| for convenience.
   const T* const* Slice(T** slice, size_t start_frame) const {
     RTC_DCHECK_LT(start_frame, num_frames_);
-    for (int i = 0; i < num_channels_; ++i)
+    for (size_t i = 0; i < num_channels_; ++i)
       slice[i] = &channels_[i][start_frame];
     return slice;
   }
@@ -115,7 +115,7 @@
 
   size_t num_frames() const { return num_frames_; }
   size_t num_frames_per_band() const { return num_frames_per_band_; }
-  int num_channels() const { return num_channels_; }
+  size_t num_channels() const { return num_channels_; }
   size_t num_bands() const { return num_bands_; }
   size_t size() const {return num_frames_ * num_channels_; }
 
@@ -130,7 +130,7 @@
   rtc::scoped_ptr<T* []> bands_;
   const size_t num_frames_;
   const size_t num_frames_per_band_;
-  const int num_channels_;
+  const size_t num_channels_;
   const size_t num_bands_;
 };
 
@@ -142,7 +142,7 @@
 // fbuf() until the next call to any of the other functions.
 class IFChannelBuffer {
  public:
-  IFChannelBuffer(size_t num_frames, int num_channels, size_t num_bands = 1);
+  IFChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1);
 
   ChannelBuffer<int16_t>* ibuf();
   ChannelBuffer<float>* fbuf();
@@ -151,7 +151,7 @@
 
   size_t num_frames() const { return ibuf_.num_frames(); }
   size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
-  int num_channels() const { return ibuf_.num_channels(); }
+  size_t num_channels() const { return ibuf_.num_channels(); }
   size_t num_bands() const { return ibuf_.num_bands(); }
 
  private:
diff --git a/webrtc/common_audio/include/audio_util.h b/webrtc/common_audio/include/audio_util.h
index 2c0028c..55dfc06 100644
--- a/webrtc/common_audio/include/audio_util.h
+++ b/webrtc/common_audio/include/audio_util.h
@@ -87,11 +87,11 @@
 template <typename T>
 void Deinterleave(const T* interleaved,
                   size_t samples_per_channel,
-                  int num_channels,
+                  size_t num_channels,
                   T* const* deinterleaved) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     T* channel = deinterleaved[i];
-    int interleaved_idx = i;
+    size_t interleaved_idx = i;
     for (size_t j = 0; j < samples_per_channel; ++j) {
       channel[j] = interleaved[interleaved_idx];
       interleaved_idx += num_channels;
@@ -105,11 +105,11 @@
 template <typename T>
 void Interleave(const T* const* deinterleaved,
                 size_t samples_per_channel,
-                int num_channels,
+                size_t num_channels,
                 T* interleaved) {
-  for (int i = 0; i < num_channels; ++i) {
+  for (size_t i = 0; i < num_channels; ++i) {
     const T* channel = deinterleaved[i];
-    int interleaved_idx = i;
+    size_t interleaved_idx = i;
     for (size_t j = 0; j < samples_per_channel; ++j) {
       interleaved[interleaved_idx] = channel[j];
       interleaved_idx += num_channels;
diff --git a/webrtc/common_audio/lapped_transform.cc b/webrtc/common_audio/lapped_transform.cc
index c01f1d9..5ab1db1 100644
--- a/webrtc/common_audio/lapped_transform.cc
+++ b/webrtc/common_audio/lapped_transform.cc
@@ -21,14 +21,14 @@
 
 void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
                                                size_t num_frames,
-                                               int num_input_channels,
-                                               int num_output_channels,
+                                               size_t num_input_channels,
+                                               size_t num_output_channels,
                                                float* const* output) {
   RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
   RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
   RTC_CHECK_EQ(parent_->block_length_, num_frames);
 
-  for (int i = 0; i < num_input_channels; ++i) {
+  for (size_t i = 0; i < num_input_channels; ++i) {
     memcpy(parent_->real_buf_.Row(i), input[i],
            num_frames * sizeof(*input[0]));
     parent_->fft_->Forward(parent_->real_buf_.Row(i),
@@ -44,7 +44,7 @@
                                                num_output_channels,
                                                parent_->cplx_post_.Array());
 
-  for (int i = 0; i < num_output_channels; ++i) {
+  for (size_t i = 0; i < num_output_channels; ++i) {
     parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
                            parent_->real_buf_.Row(i));
     memcpy(output[i], parent_->real_buf_.Row(i),
@@ -52,8 +52,8 @@
   }
 }
 
-LappedTransform::LappedTransform(int num_in_channels,
-                                 int num_out_channels,
+LappedTransform::LappedTransform(size_t num_in_channels,
+                                 size_t num_out_channels,
                                  size_t chunk_length,
                                  const float* window,
                                  size_t block_length,
diff --git a/webrtc/common_audio/lapped_transform.h b/webrtc/common_audio/lapped_transform.h
index 21e10e3..1373ca1 100644
--- a/webrtc/common_audio/lapped_transform.h
+++ b/webrtc/common_audio/lapped_transform.h
@@ -35,8 +35,8 @@
     virtual ~Callback() {}
 
     virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
-                                   int num_in_channels, size_t frames,
-                                   int num_out_channels,
+                                   size_t num_in_channels, size_t frames,
+                                   size_t num_out_channels,
                                    std::complex<float>* const* out_block) = 0;
   };
 
@@ -46,8 +46,8 @@
   // |block_length| defines the length of a block, in samples.
   // |shift_amount| is in samples. |callback| is the caller-owned audio
   // processing function called for each block of the input chunk.
-  LappedTransform(int num_in_channels,
-                  int num_out_channels,
+  LappedTransform(size_t num_in_channels,
+                  size_t num_out_channels,
                   size_t chunk_length,
                   const float* window,
                   size_t block_length,
@@ -75,7 +75,7 @@
   // in_chunk.
   //
   // Returns the same num_in_channels passed to the LappedTransform constructor.
-  int num_in_channels() const { return num_in_channels_; }
+  size_t num_in_channels() const { return num_in_channels_; }
 
   // Get the number of output channels.
   //
@@ -84,7 +84,7 @@
   //
   // Returns the same num_out_channels passed to the LappedTransform
   // constructor.
-  int num_out_channels() const { return num_out_channels_; }
+  size_t num_out_channels() const { return num_out_channels_; }
 
  private:
   // Internal middleware callback, given to the blocker. Transforms each block
@@ -93,16 +93,18 @@
    public:
     explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
 
-    virtual void ProcessBlock(const float* const* input, size_t num_frames,
-                              int num_input_channels, int num_output_channels,
+    virtual void ProcessBlock(const float* const* input,
+                              size_t num_frames,
+                              size_t num_input_channels,
+                              size_t num_output_channels,
                               float* const* output);
 
    private:
     LappedTransform* const parent_;
   } blocker_callback_;
 
-  const int num_in_channels_;
-  const int num_out_channels_;
+  const size_t num_in_channels_;
+  const size_t num_out_channels_;
 
   const size_t block_length_;
   const size_t chunk_length_;
diff --git a/webrtc/common_audio/lapped_transform_unittest.cc b/webrtc/common_audio/lapped_transform_unittest.cc
index eb1c80f..a78488e 100644
--- a/webrtc/common_audio/lapped_transform_unittest.cc
+++ b/webrtc/common_audio/lapped_transform_unittest.cc
@@ -25,12 +25,12 @@
   NoopCallback() : block_num_(0) {}
 
   virtual void ProcessAudioBlock(const complex<float>* const* in_block,
-                                 int in_channels,
+                                 size_t in_channels,
                                  size_t frames,
-                                 int out_channels,
+                                 size_t out_channels,
                                  complex<float>* const* out_block) {
     RTC_CHECK_EQ(in_channels, out_channels);
-    for (int i = 0; i < out_channels; ++i) {
+    for (size_t i = 0; i < out_channels; ++i) {
       memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
     }
     ++block_num_;
@@ -49,9 +49,9 @@
   FftCheckerCallback() : block_num_(0) {}
 
   virtual void ProcessAudioBlock(const complex<float>* const* in_block,
-                                 int in_channels,
+                                 size_t in_channels,
                                  size_t frames,
-                                 int out_channels,
+                                 size_t out_channels,
                                  complex<float>* const* out_block) {
     RTC_CHECK_EQ(in_channels, out_channels);
 
@@ -90,7 +90,7 @@
 namespace webrtc {
 
 TEST(LappedTransformTest, Windowless) {
-  const int kChannels = 3;
+  const size_t kChannels = 3;
   const size_t kChunkLength = 512;
   const size_t kBlockLength = 64;
   const size_t kShiftAmount = 64;
@@ -118,7 +118,7 @@
 
   trans.ProcessChunk(in_chunk, out_chunk);
 
-  for (int i = 0; i < kChannels; ++i) {
+  for (size_t i = 0; i < kChannels; ++i) {
     for (size_t j = 0; j < kChunkLength; ++j) {
       ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
     }
diff --git a/webrtc/common_audio/resampler/include/push_resampler.h b/webrtc/common_audio/resampler/include/push_resampler.h
index b5c0003..eeda790 100644
--- a/webrtc/common_audio/resampler/include/push_resampler.h
+++ b/webrtc/common_audio/resampler/include/push_resampler.h
@@ -29,7 +29,7 @@
   // Must be called whenever the parameters change. Free to be called at any
   // time as it is a no-op if parameters have not changed since the last call.
   int InitializeIfNeeded(int src_sample_rate_hz, int dst_sample_rate_hz,
-                         int num_channels);
+                         size_t num_channels);
 
   // Returns the total number of samples provided in destination (e.g. 32 kHz,
   // 2 channel audio gives 640 samples).
@@ -40,7 +40,7 @@
   rtc::scoped_ptr<PushSincResampler> sinc_resampler_right_;
   int src_sample_rate_hz_;
   int dst_sample_rate_hz_;
-  int num_channels_;
+  size_t num_channels_;
   rtc::scoped_ptr<T[]> src_left_;
   rtc::scoped_ptr<T[]> src_right_;
   rtc::scoped_ptr<T[]> dst_left_;
diff --git a/webrtc/common_audio/resampler/include/resampler.h b/webrtc/common_audio/resampler/include/resampler.h
index 0d4c1af..e26ac90 100644
--- a/webrtc/common_audio/resampler/include/resampler.h
+++ b/webrtc/common_audio/resampler/include/resampler.h
@@ -28,14 +28,14 @@
 
 public:
     Resampler();
-    Resampler(int inFreq, int outFreq, int num_channels);
+    Resampler(int inFreq, int outFreq, size_t num_channels);
     ~Resampler();
 
     // Reset all states
-    int Reset(int inFreq, int outFreq, int num_channels);
+    int Reset(int inFreq, int outFreq, size_t num_channels);
 
     // Reset all states if any parameter has changed
-    int ResetIfNeeded(int inFreq, int outFreq, int num_channels);
+    int ResetIfNeeded(int inFreq, int outFreq, size_t num_channels);
 
     // Resample samplesIn to samplesOut.
     int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut,
@@ -83,7 +83,7 @@
     int my_in_frequency_khz_;
     int my_out_frequency_khz_;
     ResamplerMode my_mode_;
-    int num_channels_;
+    size_t num_channels_;
 
     // Extra instance for stereo
     Resampler* slave_left_;
diff --git a/webrtc/common_audio/resampler/push_resampler.cc b/webrtc/common_audio/resampler/push_resampler.cc
index 566acde..f654e9a 100644
--- a/webrtc/common_audio/resampler/push_resampler.cc
+++ b/webrtc/common_audio/resampler/push_resampler.cc
@@ -32,7 +32,7 @@
 template <typename T>
 int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
                                          int dst_sample_rate_hz,
-                                         int num_channels) {
+                                         size_t num_channels) {
   if (src_sample_rate_hz == src_sample_rate_hz_ &&
       dst_sample_rate_hz == dst_sample_rate_hz_ &&
       num_channels == num_channels_)
@@ -68,10 +68,8 @@
 template <typename T>
 int PushResampler<T>::Resample(const T* src, size_t src_length, T* dst,
                                size_t dst_capacity) {
-  const size_t src_size_10ms =
-      static_cast<size_t>(src_sample_rate_hz_ * num_channels_ / 100);
-  const size_t dst_size_10ms =
-      static_cast<size_t>(dst_sample_rate_hz_ * num_channels_ / 100);
+  const size_t src_size_10ms = src_sample_rate_hz_ * num_channels_ / 100;
+  const size_t dst_size_10ms = dst_sample_rate_hz_ * num_channels_ / 100;
   if (src_length != src_size_10ms || dst_capacity < dst_size_10ms)
     return -1;
 
diff --git a/webrtc/common_audio/resampler/resampler.cc b/webrtc/common_audio/resampler/resampler.cc
index c9e7a1f..7c690fc 100644
--- a/webrtc/common_audio/resampler/resampler.cc
+++ b/webrtc/common_audio/resampler/resampler.cc
@@ -39,7 +39,7 @@
       slave_right_(nullptr) {
 }
 
-Resampler::Resampler(int inFreq, int outFreq, int num_channels)
+Resampler::Resampler(int inFreq, int outFreq, size_t num_channels)
     : Resampler() {
   Reset(inFreq, outFreq, num_channels);
 }
@@ -76,7 +76,7 @@
     }
 }
 
-int Resampler::ResetIfNeeded(int inFreq, int outFreq, int num_channels)
+int Resampler::ResetIfNeeded(int inFreq, int outFreq, size_t num_channels)
 {
     int tmpInFreq_kHz = inFreq / 1000;
     int tmpOutFreq_kHz = outFreq / 1000;
@@ -91,7 +91,7 @@
     }
 }
 
-int Resampler::Reset(int inFreq, int outFreq, int num_channels)
+int Resampler::Reset(int inFreq, int outFreq, size_t num_channels)
 {
     if (num_channels != 1 && num_channels != 2) {
       return -1;
diff --git a/webrtc/common_audio/wav_file.cc b/webrtc/common_audio/wav_file.cc
index 27d335c..94b7a3c 100644
--- a/webrtc/common_audio/wav_file.cc
+++ b/webrtc/common_audio/wav_file.cc
@@ -99,7 +99,7 @@
 }
 
 WavWriter::WavWriter(const std::string& filename, int sample_rate,
-                     int num_channels)
+                     size_t num_channels)
     : sample_rate_(sample_rate),
       num_channels_(num_channels),
       num_samples_(0),
@@ -153,7 +153,7 @@
 
 rtc_WavWriter* rtc_WavOpen(const char* filename,
                            int sample_rate,
-                           int num_channels) {
+                           size_t num_channels) {
   return reinterpret_cast<rtc_WavWriter*>(
       new webrtc::WavWriter(filename, sample_rate, num_channels));
 }
@@ -172,7 +172,7 @@
   return reinterpret_cast<const webrtc::WavWriter*>(wf)->sample_rate();
 }
 
-int rtc_WavNumChannels(const rtc_WavWriter* wf) {
+size_t rtc_WavNumChannels(const rtc_WavWriter* wf) {
   return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_channels();
 }
 
diff --git a/webrtc/common_audio/wav_file.h b/webrtc/common_audio/wav_file.h
index eb2ce1e..e656eb8 100644
--- a/webrtc/common_audio/wav_file.h
+++ b/webrtc/common_audio/wav_file.h
@@ -27,7 +27,7 @@
   virtual ~WavFile() {}
 
   virtual int sample_rate() const = 0;
-  virtual int num_channels() const = 0;
+  virtual size_t num_channels() const = 0;
   virtual size_t num_samples() const = 0;
 
   // Returns a human-readable string containing the audio format.
@@ -39,7 +39,7 @@
 class WavWriter final : public WavFile {
  public:
   // Open a new WAV file for writing.
-  WavWriter(const std::string& filename, int sample_rate, int num_channels);
+  WavWriter(const std::string& filename, int sample_rate, size_t num_channels);
 
   // Close the WAV file, after writing its header.
   ~WavWriter();
@@ -51,13 +51,13 @@
   void WriteSamples(const int16_t* samples, size_t num_samples);
 
   int sample_rate() const override { return sample_rate_; }
-  int num_channels() const override { return num_channels_; }
+  size_t num_channels() const override { return num_channels_; }
   size_t num_samples() const override { return num_samples_; }
 
  private:
   void Close();
   const int sample_rate_;
-  const int num_channels_;
+  const size_t num_channels_;
   size_t num_samples_;  // Total number of samples written to file.
   FILE* file_handle_;  // Output file, owned by this class
 
@@ -79,13 +79,13 @@
   size_t ReadSamples(size_t num_samples, int16_t* samples);
 
   int sample_rate() const override { return sample_rate_; }
-  int num_channels() const override { return num_channels_; }
+  size_t num_channels() const override { return num_channels_; }
   size_t num_samples() const override { return num_samples_; }
 
  private:
   void Close();
   int sample_rate_;
-  int num_channels_;
+  size_t num_channels_;
   size_t num_samples_;  // Total number of samples in the file.
   size_t num_samples_remaining_;
   FILE* file_handle_;  // Input file, owned by this class.
@@ -102,13 +102,13 @@
 typedef struct rtc_WavWriter rtc_WavWriter;
 rtc_WavWriter* rtc_WavOpen(const char* filename,
                            int sample_rate,
-                           int num_channels);
+                           size_t num_channels);
 void rtc_WavClose(rtc_WavWriter* wf);
 void rtc_WavWriteSamples(rtc_WavWriter* wf,
                          const float* samples,
                          size_t num_samples);
 int rtc_WavSampleRate(const rtc_WavWriter* wf);
-int rtc_WavNumChannels(const rtc_WavWriter* wf);
+size_t rtc_WavNumChannels(const rtc_WavWriter* wf);
 size_t rtc_WavNumSamples(const rtc_WavWriter* wf);
 
 #ifdef __cplusplus
diff --git a/webrtc/common_audio/wav_file_unittest.cc b/webrtc/common_audio/wav_file_unittest.cc
index 3732079..ba1db1c 100644
--- a/webrtc/common_audio/wav_file_unittest.cc
+++ b/webrtc/common_audio/wav_file_unittest.cc
@@ -30,7 +30,7 @@
   {
     WavWriter w(outfile, 14099, 1);
     EXPECT_EQ(14099, w.sample_rate());
-    EXPECT_EQ(1, w.num_channels());
+    EXPECT_EQ(1u, w.num_channels());
     EXPECT_EQ(0u, w.num_samples());
     w.WriteSamples(kSamples, kNumSamples);
     EXPECT_EQ(kNumSamples, w.num_samples());
@@ -78,7 +78,7 @@
   {
     WavReader r(outfile);
     EXPECT_EQ(14099, r.sample_rate());
-    EXPECT_EQ(1, r.num_channels());
+    EXPECT_EQ(1u, r.num_channels());
     EXPECT_EQ(kNumSamples, r.num_samples());
     static const float kTruncatedSamples[] = {0.0, 10.0, 32767.0};
     float samples[kNumSamples];
@@ -93,7 +93,7 @@
   const std::string outfile = test::OutputPath() + "wavtest2.wav";
   rtc_WavWriter* w = rtc_WavOpen(outfile.c_str(), 11904, 2);
   EXPECT_EQ(11904, rtc_WavSampleRate(w));
-  EXPECT_EQ(2, rtc_WavNumChannels(w));
+  EXPECT_EQ(2u, rtc_WavNumChannels(w));
   EXPECT_EQ(0u, rtc_WavNumSamples(w));
   static const size_t kNumSamples = 4;
   rtc_WavWriteSamples(w, &kSamples[0], 2);
@@ -136,7 +136,7 @@
 TEST(WavWriterTest, LargeFile) {
   std::string outfile = test::OutputPath() + "wavtest3.wav";
   static const int kSampleRate = 8000;
-  static const int kNumChannels = 2;
+  static const size_t kNumChannels = 2;
   static const size_t kNumSamples = 3 * kSampleRate * kNumChannels;
   float samples[kNumSamples];
   for (size_t i = 0; i < kNumSamples; i += kNumChannels) {
diff --git a/webrtc/common_audio/wav_header.cc b/webrtc/common_audio/wav_header.cc
index d2aa426..402ea17 100644
--- a/webrtc/common_audio/wav_header.cc
+++ b/webrtc/common_audio/wav_header.cc
@@ -59,7 +59,7 @@
 
 }  // namespace
 
-bool CheckWavParameters(int num_channels,
+bool CheckWavParameters(size_t num_channels,
                         int sample_rate,
                         WavFormat format,
                         size_t bytes_per_sample,
@@ -67,12 +67,11 @@
   // num_channels, sample_rate, and bytes_per_sample must be positive, must fit
   // in their respective fields, and their product must fit in the 32-bit
   // ByteRate field.
-  if (num_channels <= 0 || sample_rate <= 0 || bytes_per_sample == 0)
+  if (num_channels == 0 || sample_rate <= 0 || bytes_per_sample == 0)
     return false;
   if (static_cast<uint64_t>(sample_rate) > std::numeric_limits<uint32_t>::max())
     return false;
-  if (static_cast<uint64_t>(num_channels) >
-      std::numeric_limits<uint16_t>::max())
+  if (num_channels > std::numeric_limits<uint16_t>::max())
     return false;
   if (static_cast<uint64_t>(bytes_per_sample) * 8 >
       std::numeric_limits<uint16_t>::max())
@@ -136,17 +135,18 @@
       bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader));
 }
 
-static inline uint32_t ByteRate(int num_channels, int sample_rate,
+static inline uint32_t ByteRate(size_t num_channels, int sample_rate,
                                 size_t bytes_per_sample) {
   return static_cast<uint32_t>(num_channels * sample_rate * bytes_per_sample);
 }
 
-static inline uint16_t BlockAlign(int num_channels, size_t bytes_per_sample) {
+static inline uint16_t BlockAlign(size_t num_channels,
+                                  size_t bytes_per_sample) {
   return static_cast<uint16_t>(num_channels * bytes_per_sample);
 }
 
 void WriteWavHeader(uint8_t* buf,
-                    int num_channels,
+                    size_t num_channels,
                     int sample_rate,
                     WavFormat format,
                     size_t bytes_per_sample,
@@ -181,7 +181,7 @@
 }
 
 bool ReadWavHeader(ReadableWav* readable,
-                   int* num_channels,
+                   size_t* num_channels,
                    int* sample_rate,
                    WavFormat* format,
                    size_t* bytes_per_sample,
diff --git a/webrtc/common_audio/wav_header.h b/webrtc/common_audio/wav_header.h
index 65b7792..6844306 100644
--- a/webrtc/common_audio/wav_header.h
+++ b/webrtc/common_audio/wav_header.h
@@ -32,7 +32,7 @@
 };
 
 // Return true if the given parameters will make a well-formed WAV header.
-bool CheckWavParameters(int num_channels,
+bool CheckWavParameters(size_t num_channels,
                         int sample_rate,
                         WavFormat format,
                         size_t bytes_per_sample,
@@ -43,7 +43,7 @@
 // channels and contain the specified total number of samples of the specified
 // type. CHECKs the input parameters for validity.
 void WriteWavHeader(uint8_t* buf,
-                    int num_channels,
+                    size_t num_channels,
                     int sample_rate,
                     WavFormat format,
                     size_t bytes_per_sample,
@@ -53,7 +53,7 @@
 // the provided output parameters. ReadableWav is used because the header can
 // be variably sized. Returns false if the header is invalid.
 bool ReadWavHeader(ReadableWav* readable,
-                   int* num_channels,
+                   size_t* num_channels,
                    int* sample_rate,
                    WavFormat* format,
                    size_t* bytes_per_sample,
diff --git a/webrtc/common_audio/wav_header_unittest.cc b/webrtc/common_audio/wav_header_unittest.cc
index 226d788..8527939 100644
--- a/webrtc/common_audio/wav_header_unittest.cc
+++ b/webrtc/common_audio/wav_header_unittest.cc
@@ -91,7 +91,7 @@
 }
 
 TEST(WavHeaderTest, ReadWavHeaderWithErrors) {
-  int num_channels = 0;
+  size_t num_channels = 0;
   int sample_rate = 0;
   WavFormat format = kWavFormatPcm;
   size_t bytes_per_sample = 0;
@@ -268,7 +268,7 @@
   static_assert(sizeof(kExpectedBuf) == kSize, "buffer size");
   EXPECT_EQ(0, memcmp(kExpectedBuf, buf, kSize));
 
-  int num_channels = 0;
+  size_t num_channels = 0;
   int sample_rate = 0;
   WavFormat format = kWavFormatPcm;
   size_t bytes_per_sample = 0;
@@ -277,7 +277,7 @@
   EXPECT_TRUE(
       ReadWavHeader(&r, &num_channels, &sample_rate, &format,
                     &bytes_per_sample, &num_samples));
-  EXPECT_EQ(17, num_channels);
+  EXPECT_EQ(17u, num_channels);
   EXPECT_EQ(12345, sample_rate);
   EXPECT_EQ(kWavFormatALaw, format);
   EXPECT_EQ(1u, bytes_per_sample);
@@ -304,7 +304,7 @@
     0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
   };
 
-  int num_channels = 0;
+  size_t num_channels = 0;
   int sample_rate = 0;
   WavFormat format = kWavFormatPcm;
   size_t bytes_per_sample = 0;
@@ -313,7 +313,7 @@
   EXPECT_TRUE(
       ReadWavHeader(&r, &num_channels, &sample_rate, &format,
                     &bytes_per_sample, &num_samples));
-  EXPECT_EQ(17, num_channels);
+  EXPECT_EQ(17u, num_channels);
   EXPECT_EQ(12345, sample_rate);
   EXPECT_EQ(kWavFormatALaw, format);
   EXPECT_EQ(1u, bytes_per_sample);
diff --git a/webrtc/common_types.h b/webrtc/common_types.h
index 6d1886b..444ef92 100644
--- a/webrtc/common_types.h
+++ b/webrtc/common_types.h
@@ -291,7 +291,7 @@
   char plname[RTP_PAYLOAD_NAME_SIZE];
   int plfreq;
   int pacsize;
-  int channels;
+  size_t channels;
   int rate;  // bits/sec unlike {start,min,max}Bitrate elsewhere in this file!
 
   bool operator==(const CodecInst& other) const {
diff --git a/webrtc/modules/audio_coding/acm2/acm_codec_database.cc b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
index bbd4509..5f3c078 100644
--- a/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
@@ -292,7 +292,9 @@
                   codec_inst.channels));
 }
 
-int ACMCodecDB::CodecId(const char* payload_name, int frequency, int channels) {
+int ACMCodecDB::CodecId(const char* payload_name,
+                        int frequency,
+                        size_t channels) {
   for (const CodecInst& ci : RentACodec::Database()) {
     bool name_match = false;
     bool frequency_match = false;
diff --git a/webrtc/modules/audio_coding/acm2/acm_codec_database.h b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
index 9e87238..6c2db9c 100644
--- a/webrtc/modules/audio_coding/acm2/acm_codec_database.h
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
@@ -48,7 +48,7 @@
     int num_packet_sizes;
     int packet_sizes_samples[kMaxNumPacketSize];
     int basic_block_samples;
-    int channel_support;
+    size_t channel_support;
   };
 
   // Returns codec id from database, given the information received in the input
@@ -60,7 +60,7 @@
   //   codec id if successful, otherwise < 0.
   static int CodecNumber(const CodecInst& codec_inst);
   static int CodecId(const CodecInst& codec_inst);
-  static int CodecId(const char* payload_name, int frequency, int channels);
+  static int CodecId(const char* payload_name, int frequency, size_t channels);
   static int ReceiverCodecNumber(const CodecInst& codec_inst);
 
   // Databases with information about the supported codecs
diff --git a/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
index 8ca77ec..855a39e 100644
--- a/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
@@ -55,7 +55,7 @@
 // G.722 = 94
 bool RemapPltypeAndUseThisCodec(const char* plname,
                                 int plfreq,
-                                int channels,
+                                size_t channels,
                                 int* pltype) {
   if (channels != 1)
     return false;  // Don't use non-mono codecs.
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
index 335c2d6..f45d5d3 100644
--- a/webrtc/modules/audio_coding/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -213,7 +213,7 @@
 int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
   enum NetEqOutputType type;
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
 
   // Accessing members, take the lock.
   CriticalSectionScoped lock(crit_sect_.get());
@@ -301,7 +301,7 @@
 
 int32_t AcmReceiver::AddCodec(int acm_codec_id,
                               uint8_t payload_type,
-                              int channels,
+                              size_t channels,
                               int sample_rate_hz,
                               AudioDecoder* audio_decoder,
                               const std::string& name) {
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.h b/webrtc/modules/audio_coding/acm2/acm_receiver.h
index 86fd927..b150612 100644
--- a/webrtc/modules/audio_coding/acm2/acm_receiver.h
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.h
@@ -44,7 +44,7 @@
     uint8_t payload_type;
     // This field is meaningful for codecs where both mono and
     // stereo versions are registered under the same ID.
-    int channels;
+    size_t channels;
     int sample_rate_hz;
   };
 
@@ -116,7 +116,7 @@
   //
   int AddCodec(int acm_codec_id,
                uint8_t payload_type,
-               int channels,
+               size_t channels,
                int sample_rate_hz,
                AudioDecoder* audio_decoder,
                const std::string& name);
diff --git a/webrtc/modules/audio_coding/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
index d7ceb8a..dfc3ef7 100644
--- a/webrtc/modules/audio_coding/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
@@ -28,10 +28,10 @@
 int ACMResampler::Resample10Msec(const int16_t* in_audio,
                                  int in_freq_hz,
                                  int out_freq_hz,
-                                 int num_audio_channels,
+                                 size_t num_audio_channels,
                                  size_t out_capacity_samples,
                                  int16_t* out_audio) {
-  size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
+  size_t in_length = in_freq_hz * num_audio_channels / 100;
   if (in_freq_hz == out_freq_hz) {
     if (out_capacity_samples < in_length) {
       assert(false);
@@ -56,7 +56,7 @@
     return -1;
   }
 
-  return out_length / num_audio_channels;
+  return static_cast<int>(out_length / num_audio_channels);
 }
 
 }  // namespace acm2
diff --git a/webrtc/modules/audio_coding/acm2/acm_resampler.h b/webrtc/modules/audio_coding/acm2/acm_resampler.h
index 700fefa..268db8b 100644
--- a/webrtc/modules/audio_coding/acm2/acm_resampler.h
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.h
@@ -25,7 +25,7 @@
   int Resample10Msec(const int16_t* in_audio,
                      int in_freq_hz,
                      int out_freq_hz,
-                     int num_audio_channels,
+                     size_t num_audio_channels,
                      size_t out_capacity_samples,
                      int16_t* out_audio);
 
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
index 034de32..c4dd349 100644
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -56,7 +56,7 @@
 int AudioCodingModule::Codec(const char* payload_name,
                              CodecInst* codec,
                              int sampling_freq_hz,
-                             int channels) {
+                             size_t channels) {
   rtc::Optional<CodecInst> ci = acm2::RentACodec::CodecInstByParams(
       payload_name, sampling_freq_hz, channels);
   if (ci) {
@@ -76,7 +76,7 @@
 
 int AudioCodingModule::Codec(const char* payload_name,
                              int sampling_freq_hz,
-                             int channels) {
+                             size_t channels) {
   rtc::Optional<acm2::RentACodec::CodecId> ci =
       acm2::RentACodec::CodecIdByParams(payload_name, sampling_freq_hz,
                                         channels);
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
index b434da2..ac302f0 100644
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
@@ -324,7 +324,7 @@
   }
 
   // Check whether we need an up-mix or down-mix?
-  const int current_num_channels =
+  const size_t current_num_channels =
       rent_a_codec_.GetEncoderStack()->NumChannels();
   const bool same_num_channels =
       ptr_frame->num_channels_ == current_num_channels;
@@ -589,7 +589,7 @@
 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
   CriticalSectionScoped lock(acm_crit_sect_.get());
   RTC_DCHECK(receiver_initialized_);
-  if (codec.channels > 2 || codec.channels < 0) {
+  if (codec.channels > 2) {
     LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
     return -1;
   }
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
index 337ff13..926671f 100644
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
@@ -189,7 +189,7 @@
     uint32_t input_timestamp;
     const int16_t* audio;
     size_t length_per_channel;
-    uint8_t audio_channel;
+    size_t audio_channel;
     // If a re-mix is required (up or down), this buffer will store a re-mixed
     // version of the input.
     int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
index ef48a48..6f82a96 100644
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
@@ -300,7 +300,7 @@
   EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
   EXPECT_EQ(id_, audio_frame.id_);
   EXPECT_EQ(0u, audio_frame.timestamp_);
-  EXPECT_GT(audio_frame.num_channels_, 0);
+  EXPECT_GT(audio_frame.num_channels_, 0u);
   EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
             audio_frame.samples_per_channel_);
   EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager.cc b/webrtc/modules/audio_coding/acm2/codec_manager.cc
index d8ef2bf..ad67377 100644
--- a/webrtc/modules/audio_coding/acm2/codec_manager.cc
+++ b/webrtc/modules/audio_coding/acm2/codec_manager.cc
@@ -11,6 +11,7 @@
 #include "webrtc/modules/audio_coding/acm2/codec_manager.h"
 
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/engine_configurations.h"
 #include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
 #include "webrtc/system_wrappers/include/trace.h"
@@ -25,8 +26,8 @@
   int dummy_id = 0;
   if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
-                 "Wrong number of channels (%d, only mono and stereo are "
-                 "supported)",
+                 "Wrong number of channels (%" PRIuS ", only mono and stereo "
+                 "are supported)",
                  send_codec.channels);
     return -1;
   }
@@ -48,7 +49,7 @@
   if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
            .value_or(false)) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
-                 "%d number of channels not supportedn for %s.",
+                 "%" PRIuS " number of channels not supportedn for %s.",
                  send_codec.channels, send_codec.plname);
     return -1;
   }
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.cc b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
index 14302e4..5695fd6 100644
--- a/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
@@ -45,7 +45,7 @@
 rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByParams(
     const char* payload_name,
     int sampling_freq_hz,
-    int channels) {
+    size_t channels) {
   return CodecIdFromIndex(
       ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels));
 }
@@ -63,7 +63,7 @@
 
 rtc::Optional<CodecInst> RentACodec::CodecInstByParams(const char* payload_name,
                                                        int sampling_freq_hz,
-                                                       int channels) {
+                                                       size_t channels) {
   rtc::Optional<CodecId> codec_id =
       CodecIdByParams(payload_name, sampling_freq_hz, channels);
   if (!codec_id)
@@ -83,7 +83,7 @@
 }
 
 rtc::Optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
-                                                       int num_channels) {
+                                                       size_t num_channels) {
   auto i = CodecIndexFromId(codec_id);
   return i ? rtc::Optional<bool>(
                  ACMCodecDB::codec_settings_[*i].channel_support >=
@@ -98,7 +98,7 @@
 
 rtc::Optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
     CodecId codec_id,
-    int num_channels) {
+    size_t num_channels) {
   rtc::Optional<int> i = CodecIndexFromId(codec_id);
   if (!i)
     return rtc::Optional<NetEqDecoder>();
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.h b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
index cf6891a..b1dcc91 100644
--- a/webrtc/modules/audio_coding/acm2/rent_a_codec.h
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
@@ -162,12 +162,12 @@
 
   static rtc::Optional<CodecId> CodecIdByParams(const char* payload_name,
                                                 int sampling_freq_hz,
-                                                int channels);
+                                                size_t channels);
   static rtc::Optional<CodecInst> CodecInstById(CodecId codec_id);
   static rtc::Optional<CodecId> CodecIdByInst(const CodecInst& codec_inst);
   static rtc::Optional<CodecInst> CodecInstByParams(const char* payload_name,
                                                     int sampling_freq_hz,
-                                                    int channels);
+                                                    size_t channels);
   static bool IsCodecValid(const CodecInst& codec_inst);
 
   static inline bool IsPayloadTypeValid(int payload_type) {
@@ -177,10 +177,11 @@
   static rtc::ArrayView<const CodecInst> Database();
 
   static rtc::Optional<bool> IsSupportedNumChannels(CodecId codec_id,
-                                                    int num_channels);
+                                                    size_t num_channels);
 
-  static rtc::Optional<NetEqDecoder> NetEqDecoderFromCodecId(CodecId codec_id,
-                                                             int num_channels);
+  static rtc::Optional<NetEqDecoder> NetEqDecoderFromCodecId(
+      CodecId codec_id,
+      size_t num_channels);
 
   // Parse codec_inst and extract payload types. If the given CodecInst was for
   // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index ebb5d36..a46b0e8 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -61,7 +61,7 @@
   // Returns the input sample rate in Hz and the number of input channels.
   // These are constants set at instantiation time.
   virtual int SampleRateHz() const = 0;
-  virtual int NumChannels() const = 0;
+  virtual size_t NumChannels() const = 0;
 
   // Returns the rate at which the RTP timestamps are updated. The default
   // implementation returns SampleRateHz().
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index 3901c30..180166c 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -75,7 +75,7 @@
   return speech_encoder_->SampleRateHz();
 }
 
-int AudioEncoderCng::NumChannels() const {
+size_t AudioEncoderCng::NumChannels() const {
   return 1;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
index d5d00ac..87383e2 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
@@ -32,7 +32,7 @@
   struct Config {
     bool IsOk() const;
 
-    int num_channels = 1;
+    size_t num_channels = 1;
     int payload_type = 13;
     // Caller keeps ownership of the AudioEncoder object.
     AudioEncoder* speech_encoder = nullptr;
@@ -51,7 +51,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   int RtpTimestampRateHz() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 26c7838..ff61db8 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -20,15 +20,6 @@
 
 namespace {
 
-int16_t NumSamplesPerFrame(int num_channels,
-                           int frame_size_ms,
-                           int sample_rate_hz) {
-  int samples_per_frame = num_channels * frame_size_ms * sample_rate_hz / 1000;
-  RTC_CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
-      << "Frame size too large.";
-  return static_cast<int16_t>(samples_per_frame);
-}
-
 template <typename T>
 typename T::Config CreateConfig(const CodecInst& codec_inst) {
   typename T::Config config;
@@ -50,9 +41,8 @@
       payload_type_(config.payload_type),
       num_10ms_frames_per_packet_(
           static_cast<size_t>(config.frame_size_ms / 10)),
-      full_frame_samples_(NumSamplesPerFrame(config.num_channels,
-                                             config.frame_size_ms,
-                                             sample_rate_hz_)),
+      full_frame_samples_(
+          config.num_channels * config.frame_size_ms * sample_rate_hz / 1000),
       first_timestamp_in_buffer_(0) {
   RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
   RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
@@ -70,7 +60,7 @@
   return sample_rate_hz_;
 }
 
-int AudioEncoderPcm::NumChannels() const {
+size_t AudioEncoderPcm::NumChannels() const {
   return num_channels_;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
index 6891cbd..b839488 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
@@ -25,7 +25,7 @@
     bool IsOk() const;
 
     int frame_size_ms;
-    int num_channels;
+    size_t num_channels;
     int payload_type;
 
    protected:
@@ -37,7 +37,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
@@ -58,7 +58,7 @@
 
  private:
   const int sample_rate_hz_;
-  const int num_channels_;
+  const size_t num_channels_;
   const int payload_type_;
   const size_t num_10ms_frames_per_packet_;
   const size_t full_frame_samples_;
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index fa476e8..d7203b9 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -48,7 +48,7 @@
   RTC_CHECK(config.IsOk());
   const size_t samples_per_channel =
       kSampleRateHz / 100 * num_10ms_frames_per_packet_;
-  for (int i = 0; i < num_channels_; ++i) {
+  for (size_t i = 0; i < num_channels_; ++i) {
     encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
     encoders_[i].encoded_buffer.SetSize(samples_per_channel / 2);
   }
@@ -68,7 +68,7 @@
   return kSampleRateHz;
 }
 
-int AudioEncoderG722::NumChannels() const {
+size_t AudioEncoderG722::NumChannels() const {
   return num_channels_;
 }
 
@@ -88,7 +88,7 @@
 
 int AudioEncoderG722::GetTargetBitrate() const {
   // 4 bits/sample, 16000 samples/s/channel.
-  return 64000 * NumChannels();
+  return static_cast<int>(64000 * NumChannels());
 }
 
 AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
@@ -104,7 +104,7 @@
   // Deinterleave samples and save them in each channel's buffer.
   const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
   for (size_t i = 0; i < kSampleRateHz / 100; ++i)
-    for (int j = 0; j < num_channels_; ++j)
+    for (size_t j = 0; j < num_channels_; ++j)
       encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
 
   // If we don't yet have enough samples for a packet, we're done for now.
@@ -116,7 +116,7 @@
   RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
   num_10ms_frames_buffered_ = 0;
   const size_t samples_per_channel = SamplesPerChannel();
-  for (int i = 0; i < num_channels_; ++i) {
+  for (size_t i = 0; i < num_channels_; ++i) {
     const size_t encoded = WebRtcG722_Encode(
         encoders_[i].encoder, encoders_[i].speech_buffer.get(),
         samples_per_channel, encoders_[i].encoded_buffer.data());
@@ -127,12 +127,12 @@
   // channel and the interleaved stream encodes two samples per byte, most
   // significant half first.
   for (size_t i = 0; i < samples_per_channel / 2; ++i) {
-    for (int j = 0; j < num_channels_; ++j) {
+    for (size_t j = 0; j < num_channels_; ++j) {
       uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
       interleave_buffer_.data()[j] = two_samples >> 4;
       interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
     }
-    for (int j = 0; j < num_channels_; ++j)
+    for (size_t j = 0; j < num_channels_; ++j)
       encoded[i * num_channels_ + j] = interleave_buffer_.data()[2 * j] << 4 |
                                        interleave_buffer_.data()[2 * j + 1];
   }
@@ -145,7 +145,7 @@
 
 void AudioEncoderG722::Reset() {
   num_10ms_frames_buffered_ = 0;
-  for (int i = 0; i < num_channels_; ++i)
+  for (size_t i = 0; i < num_channels_; ++i)
     RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
index 5f1b762..07d767e 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
@@ -27,7 +27,7 @@
 
     int payload_type = 9;
     int frame_size_ms = 20;
-    int num_channels = 1;
+    size_t num_channels = 1;
   };
 
   explicit AudioEncoderG722(const Config& config);
@@ -36,7 +36,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   int RtpTimestampRateHz() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
@@ -59,7 +59,7 @@
 
   size_t SamplesPerChannel() const;
 
-  const int num_channels_;
+  const size_t num_channels_;
   const int payload_type_;
   const size_t num_10ms_frames_per_packet_;
   size_t num_10ms_frames_buffered_;
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 15c4149..ddd6dde 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -64,7 +64,7 @@
   return kSampleRateHz;
 }
 
-int AudioEncoderIlbc::NumChannels() const {
+size_t AudioEncoderIlbc::NumChannels() const {
   return 1;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
index 0800c0f..102a274 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
@@ -36,7 +36,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index 3226877..321dac3 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -56,7 +56,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index 4cfd782..d4438cc 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -88,7 +88,7 @@
 }
 
 template <typename T>
-int AudioEncoderIsacT<T>::NumChannels() const {
+size_t AudioEncoderIsacT<T>::NumChannels() const {
   return 1;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 29cba8f..66adde4 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -24,7 +24,7 @@
   MOCK_METHOD1(Mark, void(std::string desc));
   MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
   MOCK_CONST_METHOD0(SampleRateHz, int());
-  MOCK_CONST_METHOD0(NumChannels, int());
+  MOCK_CONST_METHOD0(NumChannels, size_t());
   MOCK_CONST_METHOD0(RtpTimestampRateHz, int());
   MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
   MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
index df0e79b..f64e811 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -17,7 +17,7 @@
 AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
     : channels_(num_channels) {
   RTC_DCHECK(num_channels == 1 || num_channels == 2);
-  WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
+  WebRtcOpus_DecoderCreate(&dec_state_, channels_);
   WebRtcOpus_DecoderInit(dec_state_);
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 0806bb8..707d6c2 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -114,7 +114,7 @@
   return kSampleRateHz;
 }
 
-int AudioEncoderOpus::NumChannels() const {
+size_t AudioEncoderOpus::NumChannels() const {
   return config_.num_channels;
 }
 
@@ -147,8 +147,7 @@
                Num10msFramesPerPacket() * SamplesPer10msFrame());
   int status = WebRtcOpus_Encode(
       inst_, &input_buffer_[0],
-      rtc::CheckedDivExact(input_buffer_.size(),
-                           static_cast<size_t>(config_.num_channels)),
+      rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
       rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded);
   RTC_CHECK_GE(status, 0);  // Fails only if fed invalid data.
   input_buffer_.clear();
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index f37e344..59c8f79 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -31,7 +31,7 @@
   struct Config {
     bool IsOk() const;
     int frame_size_ms = 20;
-    int num_channels = 1;
+    size_t num_channels = 1;
     int payload_type = 120;
     ApplicationMode application = kVoip;
     int bitrate_bps = 64000;
@@ -56,7 +56,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
   int GetTargetBitrate() const override;
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index cb895a5..4f9f7ff 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -9,6 +9,7 @@
  */
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
 #include "webrtc/test/testsupport/fileutils.h"
@@ -21,7 +22,7 @@
 namespace webrtc {
 
 // Define coding parameter as <channels, bit_rate, filename, extension>.
-typedef tuple<int, int, string, string> coding_param;
+typedef tuple<size_t, int, string, string> coding_param;
 typedef struct mode mode;
 
 struct mode {
@@ -47,7 +48,7 @@
   int sampling_khz_;
   size_t block_length_sample_;
 
-  int channels_;
+  size_t channels_;
   int bit_rate_;
 
   size_t data_pointer_;
@@ -68,7 +69,7 @@
 void OpusFecTest::SetUp() {
   channels_ = get<0>(GetParam());
   bit_rate_ = get<1>(GetParam());
-  printf("Coding %d channel signal at %d bps.\n", channels_, bit_rate_);
+  printf("Coding %" PRIuS " channel signal at %d bps.\n", channels_, bit_rate_);
 
   in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_inst.h b/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
index 662c6fa..8d032ba 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
@@ -17,7 +17,7 @@
 
 struct WebRtcOpusEncInst {
   OpusEncoder* encoder;
-  int channels;
+  size_t channels;
   int in_dtx_mode;
   // When Opus is in DTX mode, we use |zero_counts| to count consecutive zeros
   // to break long zero segment so as to prevent DTX from going wrong. We use
@@ -30,7 +30,7 @@
 struct WebRtcOpusDecInst {
   OpusDecoder* decoder;
   int prev_decoded_samples;
-  int channels;
+  size_t channels;
   int in_dtx_mode;
 };
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
index e8cdd64..9dc7ef9 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -42,7 +42,7 @@
 };
 
 int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
-                                 int32_t channels,
+                                 size_t channels,
                                  int32_t application) {
   int opus_app;
   if (!inst)
@@ -67,7 +67,7 @@
   assert(state->zero_counts);
 
   int error;
-  state->encoder = opus_encoder_create(48000, channels, opus_app,
+  state->encoder = opus_encoder_create(48000, (int)channels, opus_app,
                                        &error);
   if (error != OPUS_OK || !state->encoder) {
     WebRtcOpus_EncoderFree(state);
@@ -99,7 +99,7 @@
                       uint8_t* encoded) {
   int res;
   size_t i;
-  int c;
+  size_t c;
 
   int16_t buffer[2 * 48 * kWebRtcOpusMaxEncodeFrameSizeMs];
 
@@ -107,7 +107,7 @@
     return -1;
   }
 
-  const int channels = inst->channels;
+  const size_t channels = inst->channels;
   int use_buffer = 0;
 
   // Break long consecutive zeros by forcing a "1" every |kZeroBreakCount|
@@ -248,7 +248,7 @@
   }
 }
 
-int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels) {
   int error;
   OpusDecInst* state;
 
@@ -260,7 +260,7 @@
     }
 
     /* Create new memory, always at 48000 Hz. */
-    state->decoder = opus_decoder_create(48000, channels, &error);
+    state->decoder = opus_decoder_create(48000, (int)channels, &error);
     if (error == OPUS_OK && state->decoder != NULL) {
       /* Creation of memory all ok. */
       state->channels = channels;
@@ -289,7 +289,7 @@
   }
 }
 
-int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
   return inst->channels;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_interface.h b/webrtc/modules/audio_coding/codecs/opus/opus_interface.h
index b73e1fb..754b49c 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_interface.h
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.h
@@ -43,7 +43,7 @@
  *                            -1 - Error
  */
 int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
-                                 int32_t channels,
+                                 size_t channels,
                                  int32_t application);
 
 int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
@@ -195,7 +195,7 @@
  */
 int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity);
 
-int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels);
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels);
 int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
 
 /****************************************************************************
@@ -203,7 +203,7 @@
  *
  * This function returns the number of channels created for Opus decoder.
  */
-int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst);
 
 /****************************************************************************
  * WebRtcOpus_DecoderInit(...)
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index b29501e..c82b184 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -42,7 +42,9 @@
   // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
   // block of |block_length_ms| milliseconds. The data is looped every
   // |loop_length_ms| milliseconds.
-  void PrepareSpeechData(int channel, int block_length_ms, int loop_length_ms);
+  void PrepareSpeechData(size_t channel,
+                         int block_length_ms,
+                         int loop_length_ms);
 
   int EncodeDecode(WebRtcOpusEncInst* encoder,
                    rtc::ArrayView<const int16_t> input_audio,
@@ -53,7 +55,7 @@
   void SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
                           opus_int32 expect, int32_t set);
 
-  void CheckAudioBounded(const int16_t* audio, size_t samples, int channels,
+  void CheckAudioBounded(const int16_t* audio, size_t samples, size_t channels,
                          uint16_t bound) const;
 
   WebRtcOpusEncInst* opus_encoder_;
@@ -62,7 +64,7 @@
   AudioLoop speech_data_;
   uint8_t bitstream_[kMaxBytes];
   size_t encoded_bytes_;
-  int channels_;
+  size_t channels_;
   int application_;
 };
 
@@ -70,11 +72,11 @@
     : opus_encoder_(NULL),
       opus_decoder_(NULL),
       encoded_bytes_(0),
-      channels_(::testing::get<0>(GetParam())),
+      channels_(static_cast<size_t>(::testing::get<0>(GetParam()))),
       application_(::testing::get<1>(GetParam())) {
 }
 
-void OpusTest::PrepareSpeechData(int channel, int block_length_ms,
+void OpusTest::PrepareSpeechData(size_t channel, int block_length_ms,
                                  int loop_length_ms) {
   const std::string file_name =
         webrtc::test::ResourcePath((channel == 1) ?
@@ -99,9 +101,9 @@
 }
 
 void OpusTest::CheckAudioBounded(const int16_t* audio, size_t samples,
-                                 int channels, uint16_t bound) const {
+                                 size_t channels, uint16_t bound) const {
   for (size_t i = 0; i < samples; ++i) {
-    for (int c = 0; c < channels; ++c) {
+    for (size_t c = 0; c < channels; ++c) {
       ASSERT_GE(audio[i * channels + c], -bound);
       ASSERT_LE(audio[i * channels + c], bound);
     }
@@ -115,7 +117,7 @@
                            int16_t* audio_type) {
   int encoded_bytes_int = WebRtcOpus_Encode(
       encoder, input_audio.data(),
-      rtc::CheckedDivExact(input_audio.size(), static_cast<size_t>(channels_)),
+      rtc::CheckedDivExact(input_audio.size(), channels_),
       kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
@@ -588,8 +590,7 @@
   auto speech_block = speech_data_.GetNextBlock();
   int encoded_bytes_int = WebRtcOpus_Encode(
       opus_encoder_, speech_block.data(),
-      rtc::CheckedDivExact(speech_block.size(),
-                           2 * static_cast<size_t>(channels_)),
+      rtc::CheckedDivExact(speech_block.size(), 2 * channels_),
       kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus10msFrameSamples,
@@ -601,7 +602,7 @@
   speech_block = speech_data_.GetNextBlock();
   encoded_bytes_int = WebRtcOpus_Encode(
       opus_encoder_, speech_block.data(),
-      rtc::CheckedDivExact(speech_block.size(), static_cast<size_t>(channels_)),
+      rtc::CheckedDivExact(speech_block.size(), channels_),
       kMaxBytes, bitstream_);
   EXPECT_GE(encoded_bytes_int, 0);
   EXPECT_EQ(kOpus20msFrameSamples,
@@ -643,8 +644,7 @@
     auto speech_block = speech_data_.GetNextBlock();
     encoded_bytes_ =
         WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
-                          rtc::CheckedDivExact(speech_block.size(),
-                                               static_cast<size_t>(channels_)),
+                          rtc::CheckedDivExact(speech_block.size(), channels_),
                           kMaxBytes, bitstream_);
     EXPECT_EQ(OPUS_OK, opus_repacketizer_cat(rp, bitstream_, encoded_bytes_));
   }
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index 177c19a..7ef1ce0 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -32,7 +32,7 @@
   return speech_encoder_->SampleRateHz();
 }
 
-int AudioEncoderCopyRed::NumChannels() const {
+size_t AudioEncoderCopyRed::NumChannels() const {
   return speech_encoder_->NumChannels();
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index d7d3a66..2f53765 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -38,7 +38,7 @@
 
   size_t MaxEncodedBytes() const override;
   int SampleRateHz() const override;
-  int NumChannels() const override;
+  size_t NumChannels() const override;
   int RtpTimestampRateHz() const override;
   size_t Num10MsFramesInNextPacket() const override;
   size_t Max10MsFramesInAPacket() const override;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index c4c3910..22601b6 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -42,7 +42,7 @@
     config.speech_encoder = &mock_encoder_;
     red_.reset(new AudioEncoderCopyRed(config));
     memset(audio_, 0, sizeof(audio_));
-    EXPECT_CALL(mock_encoder_, NumChannels()).WillRepeatedly(Return(1));
+    EXPECT_CALL(mock_encoder_, NumChannels()).WillRepeatedly(Return(1U));
     EXPECT_CALL(mock_encoder_, SampleRateHz())
         .WillRepeatedly(Return(sample_rate_hz_));
     EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
@@ -110,8 +110,8 @@
 }
 
 TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
-  EXPECT_CALL(mock_encoder_, NumChannels()).WillOnce(Return(17));
-  EXPECT_EQ(17, red_->NumChannels());
+  EXPECT_CALL(mock_encoder_, NumChannels()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->NumChannels());
 }
 
 TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index 07a15ff..3dc6654 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -11,6 +11,7 @@
 #include "webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/test/testsupport/fileutils.h"
 
 using ::std::tr1::get;
@@ -99,7 +100,7 @@
   size_t time_now_ms = 0;
   float time_ms;
 
-  printf("Coding %d kHz-sampled %d-channel audio at %d bps ...\n",
+  printf("Coding %d kHz-sampled %" PRIuS "-channel audio at %d bps ...\n",
          input_sampling_khz_, channels_, bit_rate_);
 
   while (time_now_ms < audio_duration_sec * 1000) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index b5aef75..fb7b3e5 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -20,7 +20,8 @@
 
 // Define coding parameter as
 // <channels, bit_rate, file_name, extension, if_save_output>.
-typedef std::tr1::tuple<int, int, std::string, std::string, bool> coding_param;
+typedef std::tr1::tuple<size_t, int, std::string, std::string, bool>
+    coding_param;
 
 class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
  protected:
@@ -74,7 +75,7 @@
   float decoding_time_ms_;
   FILE* out_file_;
 
-  int channels_;
+  size_t channels_;
 
   // Bit rate is in bit-per-second.
   int bit_rate_;
diff --git a/webrtc/modules/audio_coding/include/audio_coding_module.h b/webrtc/modules/audio_coding/include/audio_coding_module.h
index 52fe383..9e7991f 100644
--- a/webrtc/modules/audio_coding/include/audio_coding_module.h
+++ b/webrtc/modules/audio_coding/include/audio_coding_module.h
@@ -134,7 +134,7 @@
   //    0 if succeeded.
   //
   static int Codec(const char* payload_name, CodecInst* codec,
-                   int sampling_freq_hz, int channels);
+                   int sampling_freq_hz, size_t channels);
 
   ///////////////////////////////////////////////////////////////////////////
   // int32_t Codec()
@@ -153,7 +153,7 @@
   //   -1 if the codec is not found.
   //
   static int Codec(const char* payload_name, int sampling_freq_hz,
-                   int channels);
+                   size_t channels);
 
   ///////////////////////////////////////////////////////////////////////////
   // bool IsCodecValid()
diff --git a/webrtc/modules/audio_coding/neteq/include/neteq.h b/webrtc/modules/audio_coding/neteq/include/neteq.h
index abe09a3..1322223 100644
--- a/webrtc/modules/audio_coding/neteq/include/neteq.h
+++ b/webrtc/modules/audio_coding/neteq/include/neteq.h
@@ -171,7 +171,7 @@
   // The speech type is written to |type|, if |type| is not NULL.
   // Returns kOK on success, or kFail in case of an error.
   virtual int GetAudio(size_t max_length, int16_t* output_audio,
-                       size_t* samples_per_channel, int* num_channels,
+                       size_t* samples_per_channel, size_t* num_channels,
                        NetEqOutputType* type) = 0;
 
   // Associates |rtp_payload_type| with |codec| and |codec_name|, and stores the
diff --git a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 7bf9499..c03fbb7 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -188,7 +188,7 @@
   void GetAndVerifyOutput() override {
     NetEqOutputType output_type;
     size_t samples_per_channel;
-    int num_channels;
+    size_t num_channels;
     // Get audio from internal decoder instance.
     EXPECT_EQ(NetEq::kOK,
               neteq_internal_->GetAudio(kMaxBlockSize,
@@ -196,7 +196,7 @@
                                         &samples_per_channel,
                                         &num_channels,
                                         &output_type));
-    EXPECT_EQ(1, num_channels);
+    EXPECT_EQ(1u, num_channels);
     EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
               samples_per_channel);
 
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index ac84ce9..6c07da4 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -151,7 +151,7 @@
 }
 
 int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
-                        size_t* samples_per_channel, int* num_channels,
+                        size_t* samples_per_channel, size_t* num_channels,
                         NetEqOutputType* type) {
   TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
   CriticalSectionScoped lock(crit_sect_.get());
@@ -744,7 +744,7 @@
 int NetEqImpl::GetAudioInternal(size_t max_length,
                                 int16_t* output,
                                 size_t* samples_per_channel,
-                                int* num_channels) {
+                                size_t* num_channels) {
   PacketList packet_list;
   DtmfEvent dtmf_event;
   Operations operation;
@@ -868,7 +868,7 @@
   const size_t samples_from_sync =
       sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
                                             output);
-  *num_channels = static_cast<int>(sync_buffer_->Channels());
+  *num_channels = sync_buffer_->Channels();
   if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
     // The sync buffer should always contain |overlap_length| samples, but now
     // too many samples have been extracted. Reinstall the |overlap_length|
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.h b/webrtc/modules/audio_coding/neteq/neteq_impl.h
index 093c8d5..940dead 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -107,7 +107,7 @@
   int GetAudio(size_t max_length,
                int16_t* output_audio,
                size_t* samples_per_channel,
-               int* num_channels,
+               size_t* num_channels,
                NetEqOutputType* type) override;
 
   int RegisterPayloadType(NetEqDecoder codec,
@@ -220,7 +220,8 @@
   int GetAudioInternal(size_t max_length,
                        int16_t* output,
                        size_t* samples_per_channel,
-                       int* num_channels) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+                       size_t* num_channels)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
   // Provides a decision to the GetAudioInternal method. The decision what to
   // do is written to |operation|. Packets to decode are written to
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index d3b304a..f734883 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -466,14 +466,14 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(
       NetEq::kOK,
       neteq_->GetAudio(
           kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
   ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   // Start with a simple check that the fake decoder is behaving as expected.
@@ -545,14 +545,14 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(
       NetEq::kOK,
       neteq_->GetAudio(
           kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
   ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   // Insert two more packets. The first one is out of order, and is already too
@@ -583,7 +583,7 @@
       neteq_->GetAudio(
           kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
   ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   // Now check the packet buffer, and make sure it is empty, since the
@@ -622,14 +622,14 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   ASSERT_LE(samples_per_channel, kMaxOutputSize);
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputPLC, type);
 
   // Register the payload type.
@@ -652,7 +652,7 @@
                                &num_channels, &type));
     ASSERT_LE(samples_per_channel, kMaxOutputSize);
     EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-    EXPECT_EQ(1, num_channels);
+    EXPECT_EQ(1u, num_channels);
     EXPECT_EQ(kOutputNormal, type)
         << "NetEq did not decode the packets as expected.";
   }
@@ -734,7 +734,7 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   uint32_t timestamp;
   uint32_t last_timestamp;
   NetEqOutputType type;
@@ -759,7 +759,7 @@
 
   for (size_t i = 1; i < 6; ++i) {
     ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-    EXPECT_EQ(1, num_channels);
+    EXPECT_EQ(1u, num_channels);
     EXPECT_EQ(expected_type[i - 1], type);
     EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&timestamp));
     EXPECT_EQ(NetEq::kOK,
@@ -779,7 +779,7 @@
 
   for (size_t i = 6; i < 8; ++i) {
     ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-    EXPECT_EQ(1, num_channels);
+    EXPECT_EQ(1u, num_channels);
     EXPECT_EQ(expected_type[i - 1], type);
     EXPECT_EQ(NetEq::kOK,
               neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
@@ -799,7 +799,7 @@
   UseNoMocks();
   CreateInstance();
   static const size_t kNetEqMaxFrameSize = 2880;  // 60 ms @ 48 kHz.
-  static const int kChannels = 2;
+  static const size_t kChannels = 2;
 
   const uint8_t kPayloadType = 17;   // Just an arbitrary number.
   const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
@@ -871,11 +871,10 @@
   EXPECT_EQ(NetEq::kOK,
             neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
 
-  const size_t kMaxOutputSize =
-      static_cast<size_t>(10 * kSampleRateHz / 1000 * kChannels);
+  const size_t kMaxOutputSize = 10 * kSampleRateHz / 1000 * kChannels;
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
 
   EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(kMaxOutputSize, output,
@@ -981,13 +980,13 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   ASSERT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   EXPECT_CALL(mock_decoder, Die());
@@ -1078,13 +1077,13 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   // Pull audio again. Decoder fails.
@@ -1094,7 +1093,7 @@
   EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
   EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
   // expansion.
   EXPECT_EQ(kOutputNormal, type);
@@ -1104,7 +1103,7 @@
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputPLC, type);
 
   // Pull audio again, should behave normal.
@@ -1112,7 +1111,7 @@
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputNormal, type);
 
   EXPECT_CALL(mock_decoder, Die());
@@ -1199,13 +1198,13 @@
   const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
   int16_t output[kMaxOutputSize];
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputCNG, type);
 
   // Pull audio again. Decoder fails.
@@ -1215,7 +1214,7 @@
   EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
   EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
   // expansion.
   EXPECT_EQ(kOutputCNG, type);
@@ -1225,7 +1224,7 @@
             neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
                              &num_channels, &type));
   EXPECT_EQ(kMaxOutputSize, samples_per_channel);
-  EXPECT_EQ(1, num_channels);
+  EXPECT_EQ(1u, num_channels);
   EXPECT_EQ(kOutputCNG, type);
 
   EXPECT_CALL(mock_decoder, Die());
diff --git a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index 1ddc7f2..d3f59ec 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -27,7 +27,7 @@
 struct TestParameters {
   int frame_size;
   int sample_rate;
-  int num_channels;
+  size_t num_channels;
 };
 
 // This is a parameterized test. The test parameters are supplied through a
@@ -163,7 +163,7 @@
 
   void VerifyOutput(size_t num_samples) {
     for (size_t i = 0; i < num_samples; ++i) {
-      for (int j = 0; j < num_channels_; ++j) {
+      for (size_t j = 0; j < num_channels_; ++j) {
         ASSERT_EQ(output_[i], output_multi_channel_[i * num_channels_ + j]) <<
             "Diff in sample " << i << ", channel " << j << ".";
       }
@@ -214,12 +214,12 @@
       NetEqOutputType output_type;
       // Get audio from mono instance.
       size_t samples_per_channel;
-      int num_channels;
+      size_t num_channels;
       EXPECT_EQ(NetEq::kOK,
                 neteq_mono_->GetAudio(kMaxBlockSize, output_,
                                       &samples_per_channel, &num_channels,
                                       &output_type));
-      EXPECT_EQ(1, num_channels);
+      EXPECT_EQ(1u, num_channels);
       EXPECT_EQ(output_size_samples_, samples_per_channel);
       // Get audio from multi-channel instance.
       ASSERT_EQ(NetEq::kOK,
@@ -239,7 +239,7 @@
     }
   }
 
-  const int num_channels_;
+  const size_t num_channels_;
   const int sample_rate_hz_;
   const int samples_per_ms_;
   const int frame_size_ms_;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index a20d712..8d52c61 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -425,7 +425,7 @@
 
   // Get audio from NetEq.
   NetEqOutputType type;
-  int num_channels;
+  size_t num_channels;
   ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
                                 &num_channels, &type));
   ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
@@ -608,7 +608,7 @@
   // Pull out all data.
   for (size_t i = 0; i < num_frames; ++i) {
     size_t out_len;
-    int num_channels;
+    size_t num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
                                   &num_channels, &type));
@@ -653,7 +653,7 @@
 
     // Pull out data once.
     size_t out_len;
-    int num_channels;
+    size_t num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
                                   &num_channels, &type));
@@ -684,7 +684,7 @@
 
     // Pull out data once.
     size_t out_len;
-    int num_channels;
+    size_t num_channels;
     NetEqOutputType type;
     ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
                                   &num_channels, &type));
@@ -709,7 +709,7 @@
   double next_input_time_ms = 0.0;
   double t_ms;
   size_t out_len;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
 
   // Insert speech for 5 seconds.
@@ -948,7 +948,7 @@
   for (size_t i = 0; i < kMaxBlockSize; ++i) {
     out_data_[i] = 1;
   }
-  int num_channels;
+  size_t num_channels;
   size_t samples_per_channel;
   EXPECT_EQ(NetEq::kFail,
             neteq_->GetAudio(kMaxBlockSize, out_data_,
@@ -982,7 +982,7 @@
   for (size_t i = 0; i < kMaxBlockSize; ++i) {
     out_data_[i] = 1;
   }
-  int num_channels;
+  size_t num_channels;
   size_t samples_per_channel;
   EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
                                 &samples_per_channel,
@@ -1038,7 +1038,7 @@
     PopulateRtpInfo(0, 0, &rtp_info);
     rtp_info.header.payloadType = payload_type;
 
-    int number_channels = 0;
+    size_t number_channels = 0;
     size_t samples_per_channel = 0;
 
     uint32_t receive_timestamp = 0;
@@ -1060,7 +1060,7 @@
                                  &samples_per_channel,
                                  &number_channels,
                                  &type));
-      ASSERT_EQ(1, number_channels);
+      ASSERT_EQ(1u, number_channels);
       ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
       ASSERT_EQ(kOutputNormal, type);
 
@@ -1082,7 +1082,7 @@
                                &samples_per_channel,
                                &number_channels,
                                &type));
-    ASSERT_EQ(1, number_channels);
+    ASSERT_EQ(1u, number_channels);
     ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
 
     // To be able to test the fading of background noise we need at lease to
@@ -1103,7 +1103,7 @@
                                  &samples_per_channel,
                                  &number_channels,
                                  &type));
-      ASSERT_EQ(1, number_channels);
+      ASSERT_EQ(1u, number_channels);
       ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
       if (type == kOutputPLCtoCNG) {
         plc_to_cng = true;
@@ -1272,7 +1272,7 @@
   // Insert some packets which decode to noise. We are not interested in
   // actual decoded values.
   NetEqOutputType output_type;
-  int num_channels;
+  size_t num_channels;
   size_t samples_per_channel;
   uint32_t receive_timestamp = 0;
   for (int n = 0; n < 100; ++n) {
@@ -1281,7 +1281,7 @@
                                   &samples_per_channel, &num_channels,
                                   &output_type));
     ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
-    ASSERT_EQ(1, num_channels);
+    ASSERT_EQ(1u, num_channels);
 
     rtp_info.header.sequenceNumber++;
     rtp_info.header.timestamp += kBlockSize16kHz;
@@ -1299,7 +1299,7 @@
                                   &samples_per_channel, &num_channels,
                                   &output_type));
     ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
-    ASSERT_EQ(1, num_channels);
+    ASSERT_EQ(1u, num_channels);
     if (n > algorithmic_frame_delay) {
       EXPECT_TRUE(IsAllZero(decoded, samples_per_channel * num_channels));
     }
@@ -1348,7 +1348,7 @@
   // Insert some packets which decode to noise. We are not interested in
   // actual decoded values.
   NetEqOutputType output_type;
-  int num_channels;
+  size_t num_channels;
   size_t samples_per_channel;
   uint32_t receive_timestamp = 0;
   int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
@@ -1358,7 +1358,7 @@
                                   &samples_per_channel, &num_channels,
                                   &output_type));
     ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
-    ASSERT_EQ(1, num_channels);
+    ASSERT_EQ(1u, num_channels);
     rtp_info.header.sequenceNumber++;
     rtp_info.header.timestamp += kBlockSize16kHz;
     receive_timestamp += kBlockSize16kHz;
@@ -1397,7 +1397,7 @@
                                   &samples_per_channel, &num_channels,
                                   &output_type));
     ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
-    ASSERT_EQ(1, num_channels);
+    ASSERT_EQ(1u, num_channels);
     EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
   }
 }
@@ -1415,7 +1415,7 @@
   const size_t kPayloadBytes = kSamples * sizeof(int16_t);
   double next_input_time_ms = 0.0;
   int16_t decoded[kBlockSize16kHz];
-  int num_channels;
+  size_t num_channels;
   size_t samples_per_channel;
   NetEqOutputType output_type;
   uint32_t receive_timestamp = 0;
@@ -1468,7 +1468,7 @@
                                   &samples_per_channel, &num_channels,
                                   &output_type));
     ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
-    ASSERT_EQ(1, num_channels);
+    ASSERT_EQ(1u, num_channels);
 
     // Expect delay (in samples) to be less than 2 packets.
     EXPECT_LE(timestamp - PlayoutTimestamp(),
@@ -1519,7 +1519,7 @@
   // Insert three speech packets. Three are needed to get the frame length
   // correct.
   size_t out_len;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   uint8_t payload[kPayloadBytes] = {0};
   WebRtcRTPHeader rtp_info;
@@ -1622,7 +1622,7 @@
 
   // Pull audio once and make sure CNG is played.
   size_t out_len;
-  int num_channels;
+  size_t num_channels;
   NetEqOutputType type;
   ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
                                 &num_channels, &type));
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 05d559d..0c09e92 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -50,7 +50,7 @@
                          NetEqDecoder::kDecoderILBC) {}
 
   void SetUp() override {
-    ASSERT_EQ(1, channels_) << "iLBC supports only mono audio.";
+    ASSERT_EQ(1u, channels_) << "iLBC supports only mono audio.";
     AudioEncoderIlbc::Config config;
     config.frame_size_ms = FLAGS_frame_size_ms;
     encoder_.reset(new AudioEncoderIlbc(config));
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 66b0903..4ccebb3 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -59,7 +59,7 @@
       bit_rate_kbps_(FLAGS_bit_rate_kbps) {}
 
 void NetEqIsacQualityTest::SetUp() {
-  ASSERT_EQ(1, channels_) << "iSAC supports only mono audio.";
+  ASSERT_EQ(1u, channels_) << "iSAC supports only mono audio.";
   // Create encoder memory.
   WebRtcIsacfix_Create(&isac_encoder_);
   ASSERT_TRUE(isac_encoder_ != NULL);
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index 6e1d86c..ac478ab 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -50,7 +50,7 @@
                          NetEqDecoder::kDecoderPCMu) {}
 
   void SetUp() override {
-    ASSERT_EQ(1, channels_) << "PCMu supports only mono audio.";
+    ASSERT_EQ(1u, channels_) << "PCMu supports only mono audio.";
     AudioEncoderPcmU::Config config;
     config.frame_size_ms = FLAGS_frame_size_ms;
     encoder_.reset(new AudioEncoderPcmU(config));
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 6becb86..694b9ed 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -12,6 +12,7 @@
 #include "webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
 
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 
 namespace webrtc {
 namespace test {
@@ -21,11 +22,11 @@
     : codec_(codec),
       decoder_(decoder),
       sample_rate_hz_(CodecSampleRateHz(codec_)),
-      channels_(static_cast<int>(decoder_->Channels())) {
+      channels_(decoder_->Channels()) {
   NetEq::Config config;
   config.sample_rate_hz = sample_rate_hz_;
   neteq_.reset(NetEq::Create(config));
-  printf("%d\n", channels_);
+  printf("%" PRIuS "\n", channels_);
 }
 
 void NetEqExternalDecoderTest::Init() {
@@ -47,7 +48,7 @@
                                                 NetEqOutputType* output_type) {
   // Get audio from regular instance.
   size_t samples_per_channel;
-  int num_channels;
+  size_t num_channels;
   EXPECT_EQ(NetEq::kOK,
             neteq_->GetAudio(max_length,
                              output,
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index 383b43a..d7b01fe 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -54,7 +54,7 @@
   std::string name_ = "dummy name";
   AudioDecoder* decoder_;
   int sample_rate_hz_;
-  int channels_;
+  size_t channels_;
   rtc::scoped_ptr<NetEq> neteq_;
 };
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index a979934..7d1f9f9 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -109,7 +109,7 @@
     static const size_t kOutDataLen =
         kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
     int16_t out_data[kOutDataLen];
-    int num_channels;
+    size_t num_channels;
     size_t samples_per_channel;
     int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
                                 &num_channels, NULL);
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index f9b0db3..9c64e0f 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -210,7 +210,7 @@
                                    int out_sampling_khz,
                                    NetEqDecoder decoder_type)
     : decoder_type_(decoder_type),
-      channels_(FLAGS_channels),
+      channels_(static_cast<size_t>(FLAGS_channels)),
       decoded_time_ms_(0),
       decodable_time_ms_(0),
       drift_factor_(FLAGS_drift_factor),
@@ -394,7 +394,7 @@
 }
 
 int NetEqQualityTest::DecodeBlock() {
-  int channels;
+  size_t channels;
   size_t samples;
   int ret = neteq_->GetAudio(out_size_samples_ * channels_, &out_data_[0],
                              &samples, &channels, NULL);
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
index e20be57..c2b2eff 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -99,7 +99,7 @@
   std::ofstream& Log();
 
   NetEqDecoder decoder_type_;
-  const int channels_;
+  const size_t channels_;
 
  private:
   int decoded_time_ms_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index f1748d5..3d79e5b 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -609,7 +609,7 @@
       static const size_t kOutDataLen =
           kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
       int16_t out_data[kOutDataLen];
-      int num_channels;
+      size_t num_channels;
       size_t samples_per_channel;
       int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
                                    &num_channels, NULL);
diff --git a/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
index e0222af..ba3c8d9 100644
--- a/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
@@ -52,7 +52,7 @@
 }
 
 void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
-                   std::string in_file_name, int sample_rate, int channels) {
+                   std::string in_file_name, int sample_rate, size_t channels) {
   struct CodecInst sendCodec;
   int noOfCodecs = acm->NumberOfCodecs();
   int codecNo;
@@ -123,7 +123,7 @@
 }
 
 void Receiver::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
-                     std::string out_file_name, int channels) {
+                     std::string out_file_name, size_t channels) {
   struct CodecInst recvCodec = CodecInst();
   int noOfCodecs;
   EXPECT_EQ(0, acm->InitializeReceiver());
diff --git a/webrtc/modules/audio_coding/test/EncodeDecodeTest.h b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
index 3881062..f9a9a5b 100644
--- a/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
@@ -48,7 +48,7 @@
  public:
   Sender();
   void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
-             std::string in_file_name, int sample_rate, int channels);
+             std::string in_file_name, int sample_rate, size_t channels);
   void Teardown();
   void Run();
   bool Add10MsData();
@@ -71,7 +71,7 @@
   Receiver();
   virtual ~Receiver() {};
   void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
-             std::string out_file_name, int channels);
+             std::string out_file_name, size_t channels);
   void Teardown();
   void Run();
   virtual bool IncomingPacket();
diff --git a/webrtc/modules/audio_coding/test/opus_test.cc b/webrtc/modules/audio_coding/test/opus_test.cc
index 466db9f..104b5e5 100644
--- a/webrtc/modules/audio_coding/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/test/opus_test.cc
@@ -62,7 +62,7 @@
   return;
 #else
   uint16_t frequency_hz;
-  int audio_channels;
+  size_t audio_channels;
   int16_t test_cntr = 0;
 
   // Open both mono and stereo test files in 32 kHz.
@@ -205,7 +205,7 @@
 #endif
 }
 
-void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
+void OpusTest::Run(TestPackStereo* channel, size_t channels, int bitrate,
                    size_t frame_length, int percent_loss) {
   AudioFrame audio_frame;
   int32_t out_freq_hz_b = out_file_.SamplingFrequency();
diff --git a/webrtc/modules/audio_coding/test/opus_test.h b/webrtc/modules/audio_coding/test/opus_test.h
index 88ef0ec..93c9ffb 100644
--- a/webrtc/modules/audio_coding/test/opus_test.h
+++ b/webrtc/modules/audio_coding/test/opus_test.h
@@ -32,7 +32,7 @@
 
  private:
   void Run(TestPackStereo* channel,
-           int channels,
+           size_t channels,
            int bitrate,
            size_t frame_length,
            int percent_loss = 0);
diff --git a/webrtc/modules/audio_coding/test/target_delay_unittest.cc b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
index 97471bb..195e9d8 100644
--- a/webrtc/modules/audio_coding/test/target_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
@@ -153,7 +153,7 @@
       ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &frame));
       // Had to use ASSERT_TRUE, ASSERT_EQ generated error.
       ASSERT_TRUE(kSampleRateHz == frame.sample_rate_hz_);
-      ASSERT_EQ(1, frame.num_channels_);
+      ASSERT_EQ(1u, frame.num_channels_);
       ASSERT_TRUE(kSampleRateHz / 100 == frame.samples_per_channel_);
     }
   }
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 0ac9eae..afb060f 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -50,8 +50,8 @@
 }
 
 // Return the max number of channels from a |list| composed of AudioFrames.
-int MaxNumChannels(const AudioFrameList* list) {
-  int max_num_channels = 1;
+size_t MaxNumChannels(const AudioFrameList* list) {
+  size_t max_num_channels = 1;
   for (AudioFrameList::const_iterator iter = list->begin();
        iter != list->end();
        ++iter) {
@@ -278,7 +278,7 @@
         //                with an API instead of dynamically.
 
         // Find the max channels over all mixing lists.
-        const int num_mixed_channels = std::max(MaxNumChannels(&mixList),
+        const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList),
             std::max(MaxNumChannels(&additionalFramesList),
                      MaxNumChannels(&rampOutList)));
 
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 7b2d635..768047d 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -383,7 +383,7 @@
                 int32_t(const void* audioSamples,
                         const size_t nSamples,
                         const size_t nBytesPerSample,
-                        const uint8_t nChannels,
+                        const size_t nChannels,
                         const uint32_t samplesPerSec,
                         const uint32_t totalDelayMS,
                         const int32_t clockDrift,
@@ -393,7 +393,7 @@
   MOCK_METHOD8(NeedMorePlayData,
                int32_t(const size_t nSamples,
                        const size_t nBytesPerSample,
-                       const uint8_t nChannels,
+                       const size_t nChannels,
                        const uint32_t samplesPerSec,
                        void* audioSamples,
                        size_t& nSamplesOut,
@@ -423,7 +423,7 @@
   int32_t RealRecordedDataIsAvailable(const void* audioSamples,
                                       const size_t nSamples,
                                       const size_t nBytesPerSample,
-                                      const uint8_t nChannels,
+                                      const size_t nChannels,
                                       const uint32_t samplesPerSec,
                                       const uint32_t totalDelayMS,
                                       const int32_t clockDrift,
@@ -445,7 +445,7 @@
 
   int32_t RealNeedMorePlayData(const size_t nSamples,
                                const size_t nBytesPerSample,
-                               const uint8_t nChannels,
+                               const size_t nChannels,
                                const uint32_t samplesPerSec,
                                void* audioSamples,
                                size_t& nSamplesOut,
@@ -521,10 +521,10 @@
   int record_sample_rate() const {
     return record_parameters_.sample_rate();
   }
-  int playout_channels() const {
+  size_t playout_channels() const {
     return playout_parameters_.channels();
   }
-  int record_channels() const {
+  size_t record_channels() const {
     return record_parameters_.channels();
   }
   size_t playout_frames_per_10ms_buffer() const {
@@ -931,7 +931,7 @@
 // not contain any explicit verification that the audio quality is perfect.
 TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
   // TODO(henrika): extend test when mono output is supported.
-  EXPECT_EQ(1, playout_channels());
+  EXPECT_EQ(1u, playout_channels());
   NiceMock<MockAudioTransport> mock(kPlayout);
   const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
   std::string file_name = GetFileName(playout_sample_rate());
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 5cca52d..1d08a6a 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -214,9 +214,9 @@
   hardware_ns_ = hardware_ns;
   low_latency_playout_ = low_latency_output;
   // TODO(henrika): add support for stereo output.
-  playout_parameters_.reset(sample_rate, channels,
+  playout_parameters_.reset(sample_rate, static_cast<size_t>(channels),
                             static_cast<size_t>(output_buffer_size));
-  record_parameters_.reset(sample_rate, channels,
+  record_parameters_.reset(sample_rate, static_cast<size_t>(channels),
                            static_cast<size_t>(input_buffer_size));
 }
 
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index a5bc840..ddae730 100644
--- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -82,14 +82,14 @@
   PRINT("%saudio layer: %s\n", kTag,
         low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
   PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
-  PRINT("%schannels: %d\n", kTag, playout_parameters_.channels());
+  PRINT("%schannels: %" PRIuS "\n", kTag, playout_parameters_.channels());
   PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
         playout_parameters_.frames_per_buffer(),
         playout_parameters_.GetBufferSizeInMilliseconds());
   PRINT("RECORD: \n");
   PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord");
   PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
-  PRINT("%schannels: %d\n", kTag, record_parameters_.channels());
+  PRINT("%schannels: %" PRIuS "\n", kTag, record_parameters_.channels());
   PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
         record_parameters_.frames_per_buffer(),
         record_parameters_.GetBufferSizeInMilliseconds());
@@ -119,7 +119,7 @@
   AudioParameters params;
   EXPECT_FALSE(params.is_valid());
   EXPECT_EQ(0, params.sample_rate());
-  EXPECT_EQ(0, params.channels());
+  EXPECT_EQ(0U, params.channels());
   EXPECT_EQ(0U, params.frames_per_buffer());
   EXPECT_EQ(0U, params.frames_per_10ms_buffer());
   EXPECT_EQ(0U, params.GetBytesPerFrame());
@@ -131,7 +131,7 @@
 // Basic test of the AudioParameters class using non default construction.
 TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
   const int kSampleRate = 48000;
-  const int kChannels = 1;
+  const size_t kChannels = 1;
   const size_t kFramesPerBuffer = 480;
   const size_t kFramesPer10msBuffer = 480;
   const size_t kBytesPerFrame = 2;
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index 4a63197..5dda724 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -43,7 +43,7 @@
 AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
 
 int AudioRecordJni::JavaAudioRecord::InitRecording(
-    int sample_rate, int channels) {
+    int sample_rate, size_t channels) {
   return audio_record_->CallIntMethod(init_recording_,
                                       static_cast<jint>(sample_rate),
                                       static_cast<jint>(channels));
@@ -185,8 +185,8 @@
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
   audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
-  const int channels = audio_parameters_.channels();
-  ALOGD("SetRecordingChannels(%d)", channels);
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetRecordingChannels(%" PRIuS ")", channels);
   audio_device_buffer_->SetRecordingChannels(channels);
   total_delay_in_milliseconds_ =
       audio_manager_->GetDelayEstimateInMilliseconds();
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index a847180..766316a 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -49,7 +49,7 @@
                    rtc::scoped_ptr<GlobalRef> audio_track);
     ~JavaAudioRecord();
 
-    int InitRecording(int sample_rate, int channels);
+    int InitRecording(int sample_rate, size_t channels);
     bool StartRecording();
     bool StopRecording();
     bool EnableBuiltInAEC(bool enable);
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index c660868..057e016 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -202,8 +202,8 @@
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
   audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
-  const int channels = audio_parameters_.channels();
-  ALOGD("SetPlayoutChannels(%d)", channels);
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetPlayoutChannels(%" PRIuS ")", channels);
   audio_device_buffer_->SetPlayoutChannels(channels);
 }
 
diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc
index 9dc001c..d2bff49 100644
--- a/webrtc/modules/audio_device/android/opensles_player.cc
+++ b/webrtc/modules/audio_device/android/opensles_player.cc
@@ -179,15 +179,15 @@
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
   audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
-  const int channels = audio_parameters_.channels();
-  ALOGD("SetPlayoutChannels(%d)", channels);
+  const size_t channels = audio_parameters_.channels();
+  ALOGD("SetPlayoutChannels(%" PRIuS ")", channels);
   audio_device_buffer_->SetPlayoutChannels(channels);
   RTC_CHECK(audio_device_buffer_);
   AllocateDataBuffers();
 }
 
 SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(
-    int channels,
+    size_t channels,
     int sample_rate,
     size_t bits_per_sample) {
   ALOGD("CreatePCMConfiguration");
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
index c9aa086..fa9e931 100644
--- a/webrtc/modules/audio_device/android/opensles_player.h
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -94,7 +94,7 @@
   void EnqueuePlayoutData();
 
   // Configures the SL_DATAFORMAT_PCM structure.
-  SLDataFormat_PCM CreatePCMConfiguration(int channels,
+  SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
                                           int sample_rate,
                                           size_t bits_per_sample);
 
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index e7b487d..48ae88e 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -169,7 +169,7 @@
 //  SetRecordingChannels
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
+int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels)
 {
     CriticalSectionScoped lock(&_critSect);
     _recChannels = channels;
@@ -181,7 +181,7 @@
 //  SetPlayoutChannels
 // ----------------------------------------------------------------------------
 
-int32_t AudioDeviceBuffer::SetPlayoutChannels(uint8_t channels)
+int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels)
 {
     CriticalSectionScoped lock(&_critSect);
     _playChannels = channels;
@@ -239,7 +239,7 @@
 //  RecordingChannels
 // ----------------------------------------------------------------------------
 
-uint8_t AudioDeviceBuffer::RecordingChannels() const
+size_t AudioDeviceBuffer::RecordingChannels() const
 {
     return _recChannels;
 }
@@ -248,7 +248,7 @@
 //  PlayoutChannels
 // ----------------------------------------------------------------------------
 
-uint8_t AudioDeviceBuffer::PlayoutChannels() const
+size_t AudioDeviceBuffer::PlayoutChannels() const
 {
     return _playChannels;
 }
@@ -487,7 +487,7 @@
 {
     uint32_t playSampleRate = 0;
     size_t playBytesPerSample = 0;
-    uint8_t playChannels = 0;
+    size_t playChannels = 0;
     {
         CriticalSectionScoped lock(&_critSect);
 
diff --git a/webrtc/modules/audio_device/audio_device_buffer.h b/webrtc/modules/audio_device/audio_device_buffer.h
index 2ab7ff5..1095971 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.h
+++ b/webrtc/modules/audio_device/audio_device_buffer.h
@@ -40,10 +40,10 @@
     int32_t RecordingSampleRate() const;
     int32_t PlayoutSampleRate() const;
 
-    virtual int32_t SetRecordingChannels(uint8_t channels);
-    virtual int32_t SetPlayoutChannels(uint8_t channels);
-    uint8_t RecordingChannels() const;
-    uint8_t PlayoutChannels() const;
+    virtual int32_t SetRecordingChannels(size_t channels);
+    virtual int32_t SetPlayoutChannels(size_t channels);
+    size_t RecordingChannels() const;
+    size_t PlayoutChannels() const;
     int32_t SetRecordingChannel(
         const AudioDeviceModule::ChannelType channel);
     int32_t RecordingChannel(
@@ -80,8 +80,8 @@
     uint32_t                  _recSampleRate;
     uint32_t                  _playSampleRate;
 
-    uint8_t                   _recChannels;
-    uint8_t                   _playChannels;
+    size_t                   _recChannels;
+    size_t                   _playChannels;
 
     // selected recording channel (left/right/both)
     AudioDeviceModule::ChannelType _recChannel;
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.cc b/webrtc/modules/audio_device/dummy/file_audio_device.cc
index ea61ce6..aac0962 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -14,9 +14,9 @@
 namespace webrtc {
 
 const int kRecordingFixedSampleRate = 48000;
-const int kRecordingNumChannels = 2;
+const size_t kRecordingNumChannels = 2;
 const int kPlayoutFixedSampleRate = 48000;
-const int kPlayoutNumChannels = 2;
+const size_t kPlayoutNumChannels = 2;
 const size_t kPlayoutBufferSize =
     kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2;
 const size_t kRecordingBufferSize =
diff --git a/webrtc/modules/audio_device/include/audio_device_defines.h b/webrtc/modules/audio_device/include/audio_device_defines.h
index 3ebbd23..b847729 100644
--- a/webrtc/modules/audio_device/include/audio_device_defines.h
+++ b/webrtc/modules/audio_device/include/audio_device_defines.h
@@ -49,7 +49,7 @@
   virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
                                           const size_t nSamples,
                                           const size_t nBytesPerSample,
-                                          const uint8_t nChannels,
+                                          const size_t nChannels,
                                           const uint32_t samplesPerSec,
                                           const uint32_t totalDelayMS,
                                           const int32_t clockDrift,
@@ -59,7 +59,7 @@
 
   virtual int32_t NeedMorePlayData(const size_t nSamples,
                                    const size_t nBytesPerSample,
-                                   const uint8_t nChannels,
+                                   const size_t nChannels,
                                    const uint32_t samplesPerSec,
                                    void* audioSamples,
                                    size_t& nSamplesOut,
@@ -82,10 +82,10 @@
   // TODO(xians): Remove this interface after Chrome and Libjingle switches
   // to OnData().
   virtual int OnDataAvailable(const int voe_channels[],
-                              int number_of_voe_channels,
+                              size_t number_of_voe_channels,
                               const int16_t* audio_data,
                               int sample_rate,
-                              int number_of_channels,
+                              size_t number_of_channels,
                               size_t number_of_frames,
                               int audio_delay_milliseconds,
                               int current_volume,
@@ -103,7 +103,7 @@
                       const void* audio_data,
                       int bits_per_sample,
                       int sample_rate,
-                      int number_of_channels,
+                      size_t number_of_channels,
                       size_t number_of_frames) {}
 
   // Method to push the captured audio data to the specific VoE channel.
@@ -116,7 +116,7 @@
                                const void* audio_data,
                                int bits_per_sample,
                                int sample_rate,
-                               int number_of_channels,
+                               size_t number_of_channels,
                                size_t number_of_frames) {}
 
   // Method to pull mixed render audio data from all active VoE channels.
@@ -125,7 +125,7 @@
   // channel.
   virtual void PullRenderData(int bits_per_sample,
                               int sample_rate,
-                              int number_of_channels,
+                              size_t number_of_channels,
                               size_t number_of_frames,
                               void* audio_data,
                               int64_t* elapsed_time_ms,
@@ -149,27 +149,27 @@
         channels_(0),
         frames_per_buffer_(0),
         frames_per_10ms_buffer_(0) {}
-  AudioParameters(int sample_rate, int channels, size_t frames_per_buffer)
+  AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer)
       : sample_rate_(sample_rate),
         channels_(channels),
         frames_per_buffer_(frames_per_buffer),
         frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
-  void reset(int sample_rate, int channels, size_t frames_per_buffer) {
+  void reset(int sample_rate, size_t channels, size_t frames_per_buffer) {
     sample_rate_ = sample_rate;
     channels_ = channels;
     frames_per_buffer_ = frames_per_buffer;
     frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
   }
   size_t bits_per_sample() const { return kBitsPerSample; }
-  void reset(int sample_rate, int channels, double ms_per_buffer) {
+  void reset(int sample_rate, size_t channels, double ms_per_buffer) {
     reset(sample_rate, channels,
           static_cast<size_t>(sample_rate * ms_per_buffer + 0.5));
   }
-  void reset(int sample_rate, int channels) {
+  void reset(int sample_rate, size_t channels) {
     reset(sample_rate, channels, static_cast<size_t>(0));
   }
   int sample_rate() const { return sample_rate_; }
-  int channels() const { return channels_; }
+  size_t channels() const { return channels_; }
   size_t frames_per_buffer() const { return frames_per_buffer_; }
   size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
   size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
@@ -200,7 +200,7 @@
 
  private:
   int sample_rate_;
-  int channels_;
+  size_t channels_;
   size_t frames_per_buffer_;
   size_t frames_per_10ms_buffer_;
 };
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index 1c9746f..076a674 100644
--- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -373,7 +373,7 @@
                 int32_t(const void* audioSamples,
                         const size_t nSamples,
                         const size_t nBytesPerSample,
-                        const uint8_t nChannels,
+                        const size_t nChannels,
                         const uint32_t samplesPerSec,
                         const uint32_t totalDelayMS,
                         const int32_t clockDrift,
@@ -383,7 +383,7 @@
   MOCK_METHOD8(NeedMorePlayData,
                int32_t(const size_t nSamples,
                        const size_t nBytesPerSample,
-                       const uint8_t nChannels,
+                       const size_t nChannels,
                        const uint32_t samplesPerSec,
                        void* audioSamples,
                        size_t& nSamplesOut,
@@ -413,7 +413,7 @@
   int32_t RealRecordedDataIsAvailable(const void* audioSamples,
                                       const size_t nSamples,
                                       const size_t nBytesPerSample,
-                                      const uint8_t nChannels,
+                                      const size_t nChannels,
                                       const uint32_t samplesPerSec,
                                       const uint32_t totalDelayMS,
                                       const int32_t clockDrift,
@@ -437,7 +437,7 @@
 
   int32_t RealNeedMorePlayData(const size_t nSamples,
                                const size_t nBytesPerSample,
-                               const uint8_t nChannels,
+                               const size_t nChannels,
                                const uint32_t samplesPerSec,
                                void* audioSamples,
                                size_t& nSamplesOut,
diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc
index 2bbdfed..26a2dcd 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -85,7 +85,7 @@
   int32_t RecordedDataIsAvailable(const void* audioSamples,
                                   const size_t nSamples,
                                   const size_t nBytesPerSample,
-                                  const uint8_t nChannels,
+                                  const size_t nChannels,
                                   const uint32_t sampleRate,
                                   const uint32_t totalDelay,
                                   const int32_t clockSkew,
@@ -110,7 +110,7 @@
 
   int32_t NeedMorePlayData(const size_t nSamples,
                            const size_t nBytesPerSample,
-                           const uint8_t nChannels,
+                           const size_t nChannels,
                            const uint32_t sampleRate,
                            void* audioSamples,
                            size_t& nSamplesOut,
diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc
index b358e50..0a2963e 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -194,7 +194,7 @@
     const void* audioSamples,
     const size_t nSamples,
     const size_t nBytesPerSample,
-    const uint8_t nChannels,
+    const size_t nChannels,
     const uint32_t samplesPerSec,
     const uint32_t totalDelayMS,
     const int32_t clockDrift,
@@ -339,7 +339,7 @@
 int32_t AudioTransportImpl::NeedMorePlayData(
     const size_t nSamples,
     const size_t nBytesPerSample,
-    const uint8_t nChannels,
+    const size_t nChannels,
     const uint32_t samplesPerSec,
     void* audioSamples,
     size_t& nSamplesOut,
@@ -365,7 +365,7 @@
                 int16_t* ptr16Out = NULL;
 
                 const size_t nSamplesIn = packet->nSamples;
-                const uint8_t nChannelsIn = packet->nChannels;
+                const size_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
                 const size_t nBytesPerSampleIn = packet->nBytesPerSample;
 
diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h
index 1870709..b7cc81c 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/webrtc/modules/audio_device/test/func_test_manager.h
@@ -49,7 +49,7 @@
     uint8_t dataBuffer[4 * 960];
     size_t nSamples;
     size_t nBytesPerSample;
-    uint8_t nChannels;
+    size_t nChannels;
     uint32_t samplesPerSec;
 };
 
@@ -88,7 +88,7 @@
     int32_t RecordedDataIsAvailable(const void* audioSamples,
                                     const size_t nSamples,
                                     const size_t nBytesPerSample,
-                                    const uint8_t nChannels,
+                                    const size_t nChannels,
                                     const uint32_t samplesPerSec,
                                     const uint32_t totalDelayMS,
                                     const int32_t clockDrift,
@@ -98,7 +98,7 @@
 
     int32_t NeedMorePlayData(const size_t nSamples,
                              const size_t nBytesPerSample,
-                             const uint8_t nChannels,
+                             const size_t nChannels,
                              const uint32_t samplesPerSec,
                              void* audioSamples,
                              size_t& nSamplesOut,
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index 77bda79..ff64267 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -44,9 +44,9 @@
 }  // namespace
 
 AudioBuffer::AudioBuffer(size_t input_num_frames,
-                         int num_input_channels,
+                         size_t num_input_channels,
                          size_t process_num_frames,
-                         int num_process_channels,
+                         size_t num_process_channels,
                          size_t output_num_frames)
   : input_num_frames_(input_num_frames),
     num_input_channels_(num_input_channels),
@@ -74,7 +74,7 @@
                                                    num_proc_channels_));
 
     if (input_num_frames_ != proc_num_frames_) {
-      for (int i = 0; i < num_proc_channels_; ++i) {
+      for (size_t i = 0; i < num_proc_channels_; ++i) {
         input_resamplers_.push_back(
             new PushSincResampler(input_num_frames_,
                                   proc_num_frames_));
@@ -82,7 +82,7 @@
     }
 
     if (output_num_frames_ != proc_num_frames_) {
-      for (int i = 0; i < num_proc_channels_; ++i) {
+      for (size_t i = 0; i < num_proc_channels_; ++i) {
         output_resamplers_.push_back(
             new PushSincResampler(proc_num_frames_,
                                   output_num_frames_));
@@ -130,7 +130,7 @@
 
   // Resample.
   if (input_num_frames_ != proc_num_frames_) {
-    for (int i = 0; i < num_proc_channels_; ++i) {
+    for (size_t i = 0; i < num_proc_channels_; ++i) {
       input_resamplers_[i]->Resample(data_ptr[i],
                                      input_num_frames_,
                                      process_buffer_->channels()[i],
@@ -140,7 +140,7 @@
   }
 
   // Convert to the S16 range.
-  for (int i = 0; i < num_proc_channels_; ++i) {
+  for (size_t i = 0; i < num_proc_channels_; ++i) {
     FloatToFloatS16(data_ptr[i],
                     proc_num_frames_,
                     data_->fbuf()->channels()[i]);
@@ -158,7 +158,7 @@
     // Convert to an intermediate buffer for subsequent resampling.
     data_ptr = process_buffer_->channels();
   }
-  for (int i = 0; i < num_channels_; ++i) {
+  for (size_t i = 0; i < num_channels_; ++i) {
     FloatS16ToFloat(data_->fbuf()->channels()[i],
                     proc_num_frames_,
                     data_ptr[i]);
@@ -166,7 +166,7 @@
 
   // Resample.
   if (output_num_frames_ != proc_num_frames_) {
-    for (int i = 0; i < num_channels_; ++i) {
+    for (size_t i = 0; i < num_channels_; ++i) {
       output_resamplers_[i]->Resample(data_ptr[i],
                                       proc_num_frames_,
                                       data[i],
@@ -175,7 +175,7 @@
   }
 
   // Upmix.
-  for (int i = num_channels_; i < stream_config.num_channels(); ++i) {
+  for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
     memcpy(data[i], data[0], output_num_frames_ * sizeof(**data));
   }
 }
@@ -197,13 +197,13 @@
   return data_->ibuf()->channels();
 }
 
-const int16_t* const* AudioBuffer::split_bands_const(int channel) const {
+const int16_t* const* AudioBuffer::split_bands_const(size_t channel) const {
   return split_data_.get() ?
          split_data_->ibuf_const()->bands(channel) :
          data_->ibuf_const()->bands(channel);
 }
 
-int16_t* const* AudioBuffer::split_bands(int channel) {
+int16_t* const* AudioBuffer::split_bands(size_t channel) {
   mixed_low_pass_valid_ = false;
   return split_data_.get() ?
          split_data_->ibuf()->bands(channel) :
@@ -254,13 +254,13 @@
   return data_->fbuf()->channels();
 }
 
-const float* const* AudioBuffer::split_bands_const_f(int channel) const {
+const float* const* AudioBuffer::split_bands_const_f(size_t channel) const {
   return split_data_.get() ?
          split_data_->fbuf_const()->bands(channel) :
          data_->fbuf_const()->bands(channel);
 }
 
-float* const* AudioBuffer::split_bands_f(int channel) {
+float* const* AudioBuffer::split_bands_f(size_t channel) {
   mixed_low_pass_valid_ = false;
   return split_data_.get() ?
          split_data_->fbuf()->bands(channel) :
@@ -341,11 +341,11 @@
   return activity_;
 }
 
-int AudioBuffer::num_channels() const {
+size_t AudioBuffer::num_channels() const {
   return num_channels_;
 }
 
-void AudioBuffer::set_num_channels(int num_channels) {
+void AudioBuffer::set_num_channels(size_t num_channels) {
   num_channels_ = num_channels;
 }
 
@@ -398,7 +398,7 @@
 
   // Resample.
   if (input_num_frames_ != proc_num_frames_) {
-    for (int i = 0; i < num_proc_channels_; ++i) {
+    for (size_t i = 0; i < num_proc_channels_; ++i) {
       input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i],
                                      input_num_frames_,
                                      data_->fbuf()->channels()[i],
@@ -423,7 +423,7 @@
       output_buffer_.reset(
           new IFChannelBuffer(output_num_frames_, num_channels_));
     }
-    for (int i = 0; i < num_channels_; ++i) {
+    for (size_t i = 0; i < num_channels_; ++i) {
       output_resamplers_[i]->Resample(
           data_->fbuf()->channels()[i], proc_num_frames_,
           output_buffer_->fbuf()->channels()[i], output_num_frames_);
@@ -448,7 +448,7 @@
         new ChannelBuffer<int16_t>(num_split_frames_,
                                    num_proc_channels_));
   }
-  for (int i = 0; i < num_proc_channels_; i++) {
+  for (size_t i = 0; i < num_proc_channels_; i++) {
     memcpy(low_pass_reference_channels_->channels()[i],
            split_bands_const(i)[kBand0To8kHz],
            low_pass_reference_channels_->num_frames_per_band() *
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index 48c9488..ff12ca2 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -34,14 +34,14 @@
  public:
   // TODO(ajm): Switch to take ChannelLayouts.
   AudioBuffer(size_t input_num_frames,
-              int num_input_channels,
+              size_t num_input_channels,
               size_t process_num_frames,
-              int num_process_channels,
+              size_t num_process_channels,
               size_t output_num_frames);
   virtual ~AudioBuffer();
 
-  int num_channels() const;
-  void set_num_channels(int num_channels);
+  size_t num_channels() const;
+  void set_num_channels(size_t num_channels);
   size_t num_frames() const;
   size_t num_frames_per_band() const;
   size_t num_keyboard_frames() const;
@@ -65,10 +65,10 @@
   // 0 <= channel < |num_proc_channels_|
   // 0 <= band < |num_bands_|
   // 0 <= sample < |num_split_frames_|
-  int16_t* const* split_bands(int channel);
-  const int16_t* const* split_bands_const(int channel) const;
-  float* const* split_bands_f(int channel);
-  const float* const* split_bands_const_f(int channel) const;
+  int16_t* const* split_bands(size_t channel);
+  const int16_t* const* split_bands_const(size_t channel) const;
+  float* const* split_bands_f(size_t channel);
+  const float* const* split_bands_const_f(size_t channel) const;
 
   // Returns a pointer array to the channels for a specific band.
   // Usage:
@@ -128,16 +128,16 @@
   // The audio is passed into DeinterleaveFrom() or CopyFrom() with input
   // format (samples per channel and number of channels).
   const size_t input_num_frames_;
-  const int num_input_channels_;
+  const size_t num_input_channels_;
   // The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
   // format.
   const size_t proc_num_frames_;
-  const int num_proc_channels_;
+  const size_t num_proc_channels_;
   // The audio is returned by InterleaveTo() and CopyTo() with output samples
   // per channels and the current number of channels. This last one can be
   // changed at any time using set_num_channels().
   const size_t output_num_frames_;
-  int num_channels_;
+  size_t num_channels_;
 
   size_t num_bands_;
   size_t num_split_frames_;
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index fea5785..744309c 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -410,16 +410,13 @@
 
 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
   for (const auto& stream : config.streams) {
-    if (stream.num_channels() < 0) {
-      return kBadNumberChannelsError;
-    }
     if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
       return kBadSampleRateError;
     }
   }
 
-  const int num_in_channels = config.input_stream().num_channels();
-  const int num_out_channels = config.output_stream().num_channels();
+  const size_t num_in_channels = config.input_stream().num_channels();
+  const size_t num_out_channels = config.output_stream().num_channels();
 
   // Need at least one input channel.
   // Need either one output channel or as many outputs as there are inputs.
@@ -429,7 +426,7 @@
   }
 
   if (capture_nonlocked_.beamformer_enabled &&
-      static_cast<size_t>(num_in_channels) != capture_.array_geometry.size()) {
+      num_in_channels != capture_.array_geometry.size()) {
     return kBadNumberChannelsError;
   }
 
@@ -527,22 +524,22 @@
   return capture_nonlocked_.split_rate;
 }
 
-int AudioProcessingImpl::num_reverse_channels() const {
+size_t AudioProcessingImpl::num_reverse_channels() const {
   // Used as callback from submodules, hence locking is not allowed.
   return formats_.rev_proc_format.num_channels();
 }
 
-int AudioProcessingImpl::num_input_channels() const {
+size_t AudioProcessingImpl::num_input_channels() const {
   // Used as callback from submodules, hence locking is not allowed.
   return formats_.api_format.input_stream().num_channels();
 }
 
-int AudioProcessingImpl::num_proc_channels() const {
+size_t AudioProcessingImpl::num_proc_channels() const {
   // Used as callback from submodules, hence locking is not allowed.
   return capture_nonlocked_.beamformer_enabled ? 1 : num_output_channels();
 }
 
-int AudioProcessingImpl::num_output_channels() const {
+size_t AudioProcessingImpl::num_output_channels() const {
   // Used as callback from submodules, hence locking is not allowed.
   return formats_.api_format.output_stream().num_channels();
 }
@@ -631,7 +628,8 @@
     audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t channel_size =
         sizeof(float) * formats_.api_format.input_stream().num_frames();
-    for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i)
+    for (size_t i = 0; i < formats_.api_format.input_stream().num_channels();
+         ++i)
       msg->add_input_channel(src[i], channel_size);
   }
 #endif
@@ -645,7 +643,8 @@
     audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t channel_size =
         sizeof(float) * formats_.api_format.output_stream().num_frames();
-    for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i)
+    for (size_t i = 0; i < formats_.api_format.output_stream().num_channels();
+         ++i)
       msg->add_output_channel(dest[i], channel_size);
     RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
                                           &crit_debug_, &debug_dump_.capture));
@@ -879,7 +878,7 @@
     return kNullPointerError;
   }
 
-  if (reverse_input_config.num_channels() <= 0) {
+  if (reverse_input_config.num_channels() == 0) {
     return kBadNumberChannelsError;
   }
 
@@ -898,7 +897,7 @@
         debug_dump_.render.event_msg->mutable_reverse_stream();
     const size_t channel_size =
         sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
-    for (int i = 0;
+    for (size_t i = 0;
          i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
       msg->add_channel(src[i], channel_size);
     RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
@@ -1455,12 +1454,12 @@
   audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init();
   msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz());
 
-  msg->set_num_input_channels(
-      formats_.api_format.input_stream().num_channels());
-  msg->set_num_output_channels(
-      formats_.api_format.output_stream().num_channels());
-  msg->set_num_reverse_channels(
-      formats_.api_format.reverse_input_stream().num_channels());
+  msg->set_num_input_channels(static_cast<google::protobuf::int32>(
+      formats_.api_format.input_stream().num_channels()));
+  msg->set_num_output_channels(static_cast<google::protobuf::int32>(
+      formats_.api_format.output_stream().num_channels()));
+  msg->set_num_reverse_channels(static_cast<google::protobuf::int32>(
+      formats_.api_format.reverse_input_stream().num_channels()));
   msg->set_reverse_sample_rate(
       formats_.api_format.reverse_input_stream().sample_rate_hz());
   msg->set_output_sample_rate(
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index 6cb9e8c..b310896 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -101,10 +101,10 @@
   // Hence there is no need for locks in these.
   int proc_sample_rate_hz() const override;
   int proc_split_sample_rate_hz() const override;
-  int num_input_channels() const override;
-  int num_proc_channels() const override;
-  int num_output_channels() const override;
-  int num_reverse_channels() const override;
+  size_t num_input_channels() const override;
+  size_t num_proc_channels() const override;
+  size_t num_output_channels() const override;
+  size_t num_reverse_channels() const override;
   int stream_delay_ms() const override;
   bool was_stream_delay_set() const override
       EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
index 7e96774..e1e6a31 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -477,7 +477,7 @@
                         RandomGenerator* rand_gen) {
   ASSERT_GT(amplitude, 0);
   ASSERT_LE(amplitude, 32767);
-  for (int ch = 0; ch < frame->num_channels_; ch++) {
+  for (size_t ch = 0; ch < frame->num_channels_; ch++) {
     for (size_t k = 0; k < frame->samples_per_channel_; k++) {
       // Store random 16 bit number between -(amplitude+1) and
       // amplitude.
diff --git a/webrtc/modules/audio_processing/beamformer/complex_matrix.h b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
index bfa3563..707c515 100644
--- a/webrtc/modules/audio_processing/beamformer/complex_matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
@@ -27,10 +27,10 @@
  public:
   ComplexMatrix() : Matrix<complex<T> >() {}
 
-  ComplexMatrix(int num_rows, int num_columns)
+  ComplexMatrix(size_t num_rows, size_t num_columns)
       : Matrix<complex<T> >(num_rows, num_columns) {}
 
-  ComplexMatrix(const complex<T>* data, int num_rows, int num_columns)
+  ComplexMatrix(const complex<T>* data, size_t num_rows, size_t num_columns)
       : Matrix<complex<T> >(data, num_rows, num_columns) {}
 
   // Complex Matrix operations.
@@ -51,7 +51,7 @@
 
   ComplexMatrix& ConjugateTranspose() {
     this->CopyDataToScratch();
-    int num_rows = this->num_rows();
+    size_t num_rows = this->num_rows();
     this->SetNumRows(this->num_columns());
     this->SetNumColumns(num_rows);
     this->Resize();
@@ -82,8 +82,8 @@
  private:
   ComplexMatrix& ConjugateTranspose(const complex<T>* const* src) {
     complex<T>* const* elements = this->elements();
-    for (int i = 0; i < this->num_rows(); ++i) {
-      for (int j = 0; j < this->num_columns(); ++j) {
+    for (size_t i = 0; i < this->num_rows(); ++i) {
+      for (size_t j = 0; j < this->num_columns(); ++j) {
         elements[i][j] = conj(src[j][i]);
       }
     }
diff --git a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
index d072832..78f4df5 100644
--- a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
+++ b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -27,7 +27,7 @@
 
 // Calculates the Euclidean norm for a row vector.
 float Norm(const ComplexMatrix<float>& x) {
-  RTC_CHECK_EQ(1, x.num_rows());
+  RTC_CHECK_EQ(1u, x.num_rows());
   const size_t length = x.num_columns();
   const complex<float>* elems = x.elements()[0];
   float result = 0.f;
@@ -43,8 +43,8 @@
     float wave_number,
     const std::vector<Point>& geometry,
     ComplexMatrix<float>* mat) {
-  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
-  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
 
   complex<float>* const* mat_els = mat->elements();
   for (size_t i = 0; i < geometry.size(); ++i) {
@@ -68,8 +68,8 @@
     int sample_rate,
     const std::vector<Point>& geometry,
     ComplexMatrix<float>* mat) {
-  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
-  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
 
   ComplexMatrix<float> interf_cov_vector(1, geometry.size());
   ComplexMatrix<float> interf_cov_vector_transposed(geometry.size(), 1);
@@ -94,8 +94,8 @@
     const std::vector<Point>& geometry,
     float angle,
     ComplexMatrix<float>* mat) {
-  RTC_CHECK_EQ(1, mat->num_rows());
-  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(1u, mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
 
   float freq_in_hertz =
       (static_cast<float>(frequency_bin) / fft_size) * sample_rate;
diff --git a/webrtc/modules/audio_processing/beamformer/matrix.h b/webrtc/modules/audio_processing/beamformer/matrix.h
index 162aef1..51c1cece9 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix.h
@@ -67,7 +67,7 @@
   Matrix() : num_rows_(0), num_columns_(0) {}
 
   // Allocates space for the elements and initializes all values to zero.
-  Matrix(int num_rows, int num_columns)
+  Matrix(size_t num_rows, size_t num_columns)
       : num_rows_(num_rows), num_columns_(num_columns) {
     Resize();
     scratch_data_.resize(num_rows_ * num_columns_);
@@ -75,7 +75,7 @@
   }
 
   // Copies |data| into the new Matrix.
-  Matrix(const T* data, int num_rows, int num_columns)
+  Matrix(const T* data, size_t num_rows, size_t num_columns)
       : num_rows_(0), num_columns_(0) {
     CopyFrom(data, num_rows, num_columns);
     scratch_data_.resize(num_rows_ * num_columns_);
@@ -90,23 +90,23 @@
   }
 
   // Copy |data| into the Matrix. The current data is lost.
-  void CopyFrom(const T* const data, int num_rows, int num_columns) {
+  void CopyFrom(const T* const data, size_t num_rows, size_t num_columns) {
     Resize(num_rows, num_columns);
     memcpy(&data_[0], data, num_rows_ * num_columns_ * sizeof(data_[0]));
   }
 
   Matrix& CopyFromColumn(const T* const* src,
                          size_t column_index,
-                         int num_rows) {
+                         size_t num_rows) {
     Resize(1, num_rows);
-    for (int i = 0; i < num_columns_; ++i) {
+    for (size_t i = 0; i < num_columns_; ++i) {
       data_[i] = src[i][column_index];
     }
 
     return *this;
   }
 
-  void Resize(int num_rows, int num_columns) {
+  void Resize(size_t num_rows, size_t num_columns) {
     if (num_rows != num_rows_ || num_columns != num_columns_) {
       num_rows_ = num_rows;
       num_columns_ = num_columns;
@@ -115,8 +115,8 @@
   }
 
   // Accessors and mutators.
-  int num_rows() const { return num_rows_; }
-  int num_columns() const { return num_columns_; }
+  size_t num_rows() const { return num_rows_; }
+  size_t num_columns() const { return num_columns_; }
   T* const* elements() { return &elements_[0]; }
   const T* const* elements() const { return &elements_[0]; }
 
@@ -124,7 +124,7 @@
     RTC_CHECK_EQ(num_rows_, num_columns_);
 
     T trace = 0;
-    for (int i = 0; i < num_rows_; ++i) {
+    for (size_t i = 0; i < num_rows_; ++i) {
       trace += elements_[i][i];
     }
     return trace;
@@ -282,8 +282,8 @@
     std::ostringstream ss;
     ss << std::endl << "Matrix" << std::endl;
 
-    for (int i = 0; i < num_rows_; ++i) {
-      for (int j = 0; j < num_columns_; ++j) {
+    for (size_t i = 0; i < num_rows_; ++i) {
+      for (size_t j = 0; j < num_columns_; ++j) {
         ss << elements_[i][j] << " ";
       }
       ss << std::endl;
@@ -294,8 +294,8 @@
   }
 
  protected:
-  void SetNumRows(const int num_rows) { num_rows_ = num_rows; }
-  void SetNumColumns(const int num_columns) { num_columns_ = num_columns; }
+  void SetNumRows(const size_t num_rows) { num_rows_ = num_rows; }
+  void SetNumColumns(const size_t num_columns) { num_columns_ = num_columns; }
   T* data() { return &data_[0]; }
   const T* data() const { return &data_[0]; }
   const T* const* scratch_elements() const { return &scratch_elements_[0]; }
@@ -307,7 +307,7 @@
     data_.resize(size);
     elements_.resize(num_rows_);
 
-    for (int i = 0; i < num_rows_; ++i) {
+    for (size_t i = 0; i < num_rows_; ++i) {
       elements_[i] = &data_[i * num_columns_];
     }
   }
@@ -317,14 +317,14 @@
     scratch_data_ = data_;
     scratch_elements_.resize(num_rows_);
 
-    for (int i = 0; i < num_rows_; ++i) {
+    for (size_t i = 0; i < num_rows_; ++i) {
       scratch_elements_[i] = &scratch_data_[i * num_columns_];
     }
   }
 
  private:
-  int num_rows_;
-  int num_columns_;
+  size_t num_rows_;
+  size_t num_columns_;
   std::vector<T> data_;
   std::vector<T*> elements_;
 
@@ -336,8 +336,8 @@
   // Helpers for Transpose and Multiply operations that unify in-place and
   // out-of-place solutions.
   Matrix& Transpose(const T* const* src) {
-    for (int i = 0; i < num_rows_; ++i) {
-      for (int j = 0; j < num_columns_; ++j) {
+    for (size_t i = 0; i < num_rows_; ++i) {
+      for (size_t j = 0; j < num_columns_; ++j) {
         elements_[i][j] = src[j][i];
       }
     }
@@ -345,11 +345,13 @@
     return *this;
   }
 
-  Matrix& Multiply(const T* const* lhs, int num_rows_rhs, const T* const* rhs) {
-    for (int row = 0; row < num_rows_; ++row) {
-      for (int col = 0; col < num_columns_; ++col) {
+  Matrix& Multiply(const T* const* lhs,
+                   size_t num_rows_rhs,
+                   const T* const* rhs) {
+    for (size_t row = 0; row < num_rows_; ++row) {
+      for (size_t col = 0; col < num_columns_; ++col) {
         T cur_element = 0;
-        for (int i = 0; i < num_rows_rhs; ++i) {
+        for (size_t i = 0; i < num_rows_rhs; ++i) {
           cur_element += lhs[row][i] * rhs[i][col];
         }
 
diff --git a/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h b/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
index 7c58670..9891a82 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
@@ -34,8 +34,8 @@
 
     const T* const* expected_elements = expected.elements();
     const T* const* actual_elements = actual.elements();
-    for (int i = 0; i < expected.num_rows(); ++i) {
-      for (int j = 0; j < expected.num_columns(); ++j) {
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
         EXPECT_EQ(expected_elements[i][j], actual_elements[i][j]);
       }
     }
@@ -48,8 +48,8 @@
 
     const float* const* expected_elements = expected.elements();
     const float* const* actual_elements = actual.elements();
-    for (int i = 0; i < expected.num_rows(); ++i) {
-      for (int j = 0; j < expected.num_columns(); ++j) {
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
         EXPECT_NEAR(expected_elements[i][j], actual_elements[i][j], kTolerance);
       }
     }
@@ -63,8 +63,8 @@
 
     const complex<float>* const* expected_elements = expected.elements();
     const complex<float>* const* actual_elements = actual.elements();
-    for (int i = 0; i < expected.num_rows(); ++i) {
-      for (int j = 0; j < expected.num_columns(); ++j) {
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
         EXPECT_NEAR(expected_elements[i][j].real(),
                     actual_elements[i][j].real(),
                     kTolerance);
@@ -84,8 +84,8 @@
 
     const complex<float>* const* expected_elements = expected.elements();
     const complex<float>* const* actual_elements = actual.elements();
-    for (int i = 0; i < expected.num_rows(); ++i) {
-      for (int j = 0; j < expected.num_columns(); ++j) {
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
         EXPECT_NEAR(expected_elements[i][j].real(),
                     actual_elements[i][j].real(),
                     tolerance);
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 0544104..6ea7234 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -79,7 +79,7 @@
 // The returned norm is clamped to be non-negative.
 float Norm(const ComplexMatrix<float>& mat,
            const ComplexMatrix<float>& norm_mat) {
-  RTC_CHECK_EQ(norm_mat.num_rows(), 1);
+  RTC_CHECK_EQ(1u, norm_mat.num_rows());
   RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
   RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
 
@@ -89,8 +89,8 @@
   const complex<float>* const* mat_els = mat.elements();
   const complex<float>* const* norm_mat_els = norm_mat.elements();
 
-  for (int i = 0; i < norm_mat.num_columns(); ++i) {
-    for (int j = 0; j < norm_mat.num_columns(); ++j) {
+  for (size_t i = 0; i < norm_mat.num_columns(); ++i) {
+    for (size_t j = 0; j < norm_mat.num_columns(); ++j) {
       first_product += conj(norm_mat_els[0][j]) * mat_els[j][i];
     }
     second_product += first_product * norm_mat_els[0][i];
@@ -102,15 +102,15 @@
 // Does conjugate(|lhs|) * |rhs| for row vectors |lhs| and |rhs|.
 complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
                                    const ComplexMatrix<float>& rhs) {
-  RTC_CHECK_EQ(lhs.num_rows(), 1);
-  RTC_CHECK_EQ(rhs.num_rows(), 1);
+  RTC_CHECK_EQ(1u, lhs.num_rows());
+  RTC_CHECK_EQ(1u, rhs.num_rows());
   RTC_CHECK_EQ(lhs.num_columns(), rhs.num_columns());
 
   const complex<float>* const* lhs_elements = lhs.elements();
   const complex<float>* const* rhs_elements = rhs.elements();
 
   complex<float> result = complex<float>(0.f, 0.f);
-  for (int i = 0; i < lhs.num_columns(); ++i) {
+  for (size_t i = 0; i < lhs.num_columns(); ++i) {
     result += conj(lhs_elements[0][i]) * rhs_elements[0][i];
   }
 
@@ -126,8 +126,8 @@
 float SumAbs(const ComplexMatrix<float>& mat) {
   float sum_abs = 0.f;
   const complex<float>* const* mat_els = mat.elements();
-  for (int i = 0; i < mat.num_rows(); ++i) {
-    for (int j = 0; j < mat.num_columns(); ++j) {
+  for (size_t i = 0; i < mat.num_rows(); ++i) {
+    for (size_t j = 0; j < mat.num_columns(); ++j) {
       sum_abs += std::abs(mat_els[i][j]);
     }
   }
@@ -138,8 +138,8 @@
 float SumSquares(const ComplexMatrix<float>& mat) {
   float sum_squares = 0.f;
   const complex<float>* const* mat_els = mat.elements();
-  for (int i = 0; i < mat.num_rows(); ++i) {
-    for (int j = 0; j < mat.num_columns(); ++j) {
+  for (size_t i = 0; i < mat.num_rows(); ++i) {
+    for (size_t j = 0; j < mat.num_columns(); ++j) {
       float abs_value = std::abs(mat_els[i][j]);
       sum_squares += abs_value * abs_value;
     }
@@ -150,13 +150,13 @@
 // Does |out| = |in|.' * conj(|in|) for row vector |in|.
 void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
                                  ComplexMatrix<float>* out) {
-  RTC_CHECK_EQ(in.num_rows(), 1);
+  RTC_CHECK_EQ(1u, in.num_rows());
   RTC_CHECK_EQ(out->num_rows(), in.num_columns());
   RTC_CHECK_EQ(out->num_columns(), in.num_columns());
   const complex<float>* in_elements = in.elements()[0];
   complex<float>* const* out_elements = out->elements();
-  for (int i = 0; i < out->num_rows(); ++i) {
-    for (int j = 0; j < out->num_columns(); ++j) {
+  for (size_t i = 0; i < out->num_rows(); ++i) {
+    for (size_t j = 0; j < out->num_columns(); ++j) {
       out_elements[i][j] = in_elements[i] * conj(in_elements[j]);
     }
   }
@@ -408,13 +408,13 @@
 }
 
 void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
-                                            int num_input_channels,
+                                            size_t num_input_channels,
                                             size_t num_freq_bins,
-                                            int num_output_channels,
+                                            size_t num_output_channels,
                                             complex_f* const* output) {
   RTC_CHECK_EQ(kNumFreqBins, num_freq_bins);
   RTC_CHECK_EQ(num_input_channels_, num_input_channels);
-  RTC_CHECK_EQ(1, num_output_channels);
+  RTC_CHECK_EQ(1u, num_output_channels);
 
   // Calculating the post-filter masks. Note that we need two for each
   // frequency bin to account for the positive and negative interferer
@@ -483,7 +483,7 @@
 
     const complex_f* delay_sum_mask_els =
         normalized_delay_sum_masks_[f_ix].elements()[0];
-    for (int c_ix = 0; c_ix < num_input_channels_; ++c_ix) {
+    for (size_t c_ix = 0; c_ix < num_input_channels_; ++c_ix) {
       output_channel[f_ix] += input[c_ix][f_ix] * delay_sum_mask_els[c_ix];
     }
 
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
index b20d938..29c416c 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -67,9 +67,9 @@
   // Process one frequency-domain block of audio. This is where the fun
   // happens. Implements LappedTransform::Callback.
   void ProcessAudioBlock(const complex<float>* const* input,
-                         int num_input_channels,
+                         size_t num_input_channels,
                          size_t num_freq_bins,
-                         int num_output_channels,
+                         size_t num_output_channels,
                          complex<float>* const* output) override;
 
  private:
@@ -129,7 +129,7 @@
   float window_[kFftSize];
 
   // Parameters exposed to the user.
-  const int num_input_channels_;
+  const size_t num_input_channels_;
   int sample_rate_hz_;
 
   const std::vector<Point> array_geometry_;
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
index cc75248..d187552 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -12,6 +12,7 @@
 
 #include "gflags/gflags.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/common_audio/wav_file.h"
 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
@@ -52,9 +53,9 @@
   NonlinearBeamformer bf(array_geometry);
   bf.Initialize(kChunkSizeMs, in_file.sample_rate());
 
-  printf("Input file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+  printf("Input file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
          FLAGS_i.c_str(), in_file.num_channels(), in_file.sample_rate());
-  printf("Output file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+  printf("Output file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
          FLAGS_o.c_str(), out_file.num_channels(), out_file.sample_rate());
 
   ChannelBuffer<float> in_buf(
diff --git a/webrtc/modules/audio_processing/common.h b/webrtc/modules/audio_processing/common.h
index cf0d8b7..d4ddb92 100644
--- a/webrtc/modules/audio_processing/common.h
+++ b/webrtc/modules/audio_processing/common.h
@@ -17,7 +17,7 @@
 
 namespace webrtc {
 
-static inline int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+static inline size_t ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
   switch (layout) {
     case AudioProcessing::kMono:
     case AudioProcessing::kMonoAndKeyboard:
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index 13e71bc..debc597 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -99,8 +99,8 @@
   // The ordering convention must be followed to pass to the correct AEC.
   size_t handle_index = 0;
   render_queue_buffer_.clear();
-  for (int i = 0; i < apm_->num_output_channels(); i++) {
-    for (int j = 0; j < audio->num_channels(); j++) {
+  for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+    for (size_t j = 0; j < audio->num_channels(); j++) {
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       // Retrieve any error code produced by the buffering of the farend
       // signal
@@ -146,8 +146,8 @@
     const size_t num_frames_per_band =
         capture_queue_buffer_.size() /
         (apm_->num_output_channels() * apm_->num_reverse_channels());
-    for (int i = 0; i < apm_->num_output_channels(); i++) {
-      for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+    for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+      for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
         Handle* my_handle = static_cast<Handle*>(handle(handle_index));
         WebRtcAec_BufferFarend(my_handle, &capture_queue_buffer_[buffer_index],
                                num_frames_per_band);
@@ -181,8 +181,8 @@
   // The ordering convention must be followed to pass to the correct AEC.
   size_t handle_index = 0;
   stream_has_echo_ = false;
-  for (int i = 0; i < audio->num_channels(); i++) {
-    for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+  for (size_t i = 0; i < audio->num_channels(); i++) {
+    for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
       Handle* my_handle = handle(handle_index);
       err = WebRtcAec_Process(my_handle, audio->split_bands_const_f(i),
                               audio->num_bands(), audio->split_bands_f(i),
@@ -489,7 +489,7 @@
   return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
 }
 
-int EchoCancellationImpl::num_handles_required() const {
+size_t EchoCancellationImpl::num_handles_required() const {
   // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_output_channels() * apm_->num_reverse_channels();
 }
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.h b/webrtc/modules/audio_processing/echo_cancellation_impl.h
index 9418fbf..a40a267 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -72,7 +72,7 @@
   int InitializeHandle(void* handle) const override;
   int ConfigureHandle(void* handle) const override;
   void DestroyHandle(void* handle) const override;
-  int num_handles_required() const override;
+  size_t num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
   void AllocateRenderQueue();
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index a39528e..f2df5f7 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -104,8 +104,8 @@
   // The ordering convention must be followed to pass to the correct AECM.
   size_t handle_index = 0;
   render_queue_buffer_.clear();
-  for (int i = 0; i < apm_->num_output_channels(); i++) {
-    for (int j = 0; j < audio->num_channels(); j++) {
+  for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+    for (size_t j = 0; j < audio->num_channels(); j++) {
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       err = WebRtcAecm_GetBufferFarendError(
           my_handle, audio->split_bands_const(j)[kBand0To8kHz],
@@ -151,8 +151,8 @@
     const size_t num_frames_per_band =
         capture_queue_buffer_.size() /
         (apm_->num_output_channels() * apm_->num_reverse_channels());
-    for (int i = 0; i < apm_->num_output_channels(); i++) {
-      for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+    for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+      for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
         Handle* my_handle = static_cast<Handle*>(handle(handle_index));
         WebRtcAecm_BufferFarend(my_handle, &capture_queue_buffer_[buffer_index],
                                 num_frames_per_band);
@@ -182,7 +182,7 @@
 
   // The ordering convention must be followed to pass to the correct AECM.
   size_t handle_index = 0;
-  for (int i = 0; i < audio->num_channels(); i++) {
+  for (size_t i = 0; i < audio->num_channels(); i++) {
     // TODO(ajm): improve how this works, possibly inside AECM.
     //            This is kind of hacked up.
     const int16_t* noisy = audio->low_pass_reference(i);
@@ -191,7 +191,7 @@
       noisy = clean;
       clean = NULL;
     }
-    for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+    for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
       Handle* my_handle = static_cast<Handle*>(handle(handle_index));
       err = WebRtcAecm_Process(
           my_handle,
@@ -394,7 +394,7 @@
   return WebRtcAecm_set_config(static_cast<Handle*>(handle), config);
 }
 
-int EchoControlMobileImpl::num_handles_required() const {
+size_t EchoControlMobileImpl::num_handles_required() const {
   // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_output_channels() * apm_->num_reverse_channels();
 }
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.h b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
index 3b5dbf3..4d6529d 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.h
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -58,7 +58,7 @@
   int InitializeHandle(void* handle) const override;
   int ConfigureHandle(void* handle) const override;
   void DestroyHandle(void* handle) const override;
-  int num_handles_required() const override;
+  size_t num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
   void AllocateRenderQueue();
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index 7b284e8..04a6c7b 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -75,7 +75,7 @@
   assert(audio->num_frames_per_band() <= 160);
 
   render_queue_buffer_.resize(0);
-  for (int i = 0; i < num_handles(); i++) {
+  for (size_t i = 0; i < num_handles(); i++) {
     Handle* my_handle = static_cast<Handle*>(handle(i));
     int err =
         WebRtcAgc_GetAddFarendError(my_handle, audio->num_frames_per_band());
@@ -114,7 +114,7 @@
     size_t buffer_index = 0;
     const size_t num_frames_per_band =
         capture_queue_buffer_.size() / num_handles();
-    for (int i = 0; i < num_handles(); i++) {
+    for (size_t i = 0; i < num_handles(); i++) {
       Handle* my_handle = static_cast<Handle*>(handle(i));
       WebRtcAgc_AddFarend(my_handle, &capture_queue_buffer_[buffer_index],
                           num_frames_per_band);
@@ -138,7 +138,7 @@
 
   if (mode_ == kAdaptiveAnalog) {
     capture_levels_.assign(num_handles(), analog_capture_level_);
-    for (int i = 0; i < num_handles(); i++) {
+    for (size_t i = 0; i < num_handles(); i++) {
       Handle* my_handle = static_cast<Handle*>(handle(i));
       err = WebRtcAgc_AddMic(
           my_handle,
@@ -152,7 +152,7 @@
     }
   } else if (mode_ == kAdaptiveDigital) {
 
-    for (int i = 0; i < num_handles(); i++) {
+    for (size_t i = 0; i < num_handles(); i++) {
       Handle* my_handle = static_cast<Handle*>(handle(i));
       int32_t capture_level_out = 0;
 
@@ -191,7 +191,7 @@
   assert(audio->num_channels() == num_handles());
 
   stream_is_saturated_ = false;
-  for (int i = 0; i < num_handles(); i++) {
+  for (size_t i = 0; i < num_handles(); i++) {
     Handle* my_handle = static_cast<Handle*>(handle(i));
     int32_t capture_level_out = 0;
     uint8_t saturation_warning = 0;
@@ -222,7 +222,7 @@
   if (mode_ == kAdaptiveAnalog) {
     // Take the analog level to be the average across the handles.
     analog_capture_level_ = 0;
-    for (int i = 0; i < num_handles(); i++) {
+    for (size_t i = 0; i < num_handles(); i++) {
       analog_capture_level_ += capture_levels_[i];
     }
 
@@ -433,7 +433,7 @@
   return WebRtcAgc_set_config(static_cast<Handle*>(handle), config);
 }
 
-int GainControlImpl::num_handles_required() const {
+size_t GainControlImpl::num_handles_required() const {
   // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_proc_channels();
 }
diff --git a/webrtc/modules/audio_processing/gain_control_impl.h b/webrtc/modules/audio_processing/gain_control_impl.h
index b531de9..72789ba 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.h
+++ b/webrtc/modules/audio_processing/gain_control_impl.h
@@ -68,7 +68,7 @@
   int InitializeHandle(void* handle) const override;
   int ConfigureHandle(void* handle) const override;
   void DestroyHandle(void* handle) const override;
-  int num_handles_required() const override;
+  size_t num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
   void AllocateRenderQueue();
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index facf1e4..375d58f 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -92,10 +92,9 @@
 
 HighPassFilterImpl::~HighPassFilterImpl() {}
 
-void HighPassFilterImpl::Initialize(int channels, int sample_rate_hz) {
-  RTC_DCHECK_LE(0, channels);
+void HighPassFilterImpl::Initialize(size_t channels, int sample_rate_hz) {
   std::vector<rtc::scoped_ptr<BiquadFilter>> new_filters(channels);
-  for (int i = 0; i < channels; i++) {
+  for (size_t i = 0; i < channels; i++) {
     new_filters[i].reset(new BiquadFilter(sample_rate_hz));
   }
   rtc::CritScope cs(crit_);
@@ -110,7 +109,7 @@
   }
 
   RTC_DCHECK_GE(160u, audio->num_frames_per_band());
-  RTC_DCHECK_EQ(filters_.size(), static_cast<size_t>(audio->num_channels()));
+  RTC_DCHECK_EQ(filters_.size(), audio->num_channels());
   for (size_t i = 0; i < filters_.size(); i++) {
     filters_[i]->Process(audio->split_bands(i)[kBand0To8kHz],
                          audio->num_frames_per_band());
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.h b/webrtc/modules/audio_processing/high_pass_filter_impl.h
index d6e84fd..0e985ba 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.h
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.h
@@ -26,7 +26,7 @@
   ~HighPassFilterImpl() override;
 
   // TODO(peah): Fold into ctor, once public API is removed.
-  void Initialize(int channels, int sample_rate_hz);
+  void Initialize(size_t channels, int sample_rate_hz);
   void ProcessCaptureAudio(AudioBuffer* audio);
 
   // HighPassFilter implementation.
diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h
index d39d27e..5dd63eb 100644
--- a/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/webrtc/modules/audio_processing/include/audio_processing.h
@@ -287,10 +287,10 @@
   // necessary classes?
   virtual int proc_sample_rate_hz() const = 0;
   virtual int proc_split_sample_rate_hz() const = 0;
-  virtual int num_input_channels() const = 0;
-  virtual int num_proc_channels() const = 0;
-  virtual int num_output_channels() const = 0;
-  virtual int num_reverse_channels() const = 0;
+  virtual size_t num_input_channels() const = 0;
+  virtual size_t num_proc_channels() const = 0;
+  virtual size_t num_output_channels() const = 0;
+  virtual size_t num_reverse_channels() const = 0;
 
   // Set to true when the output of AudioProcessing will be muted or in some
   // other way not used. Ideally, the captured audio would still be processed,
@@ -502,7 +502,7 @@
   //               is true, the last channel in any corresponding list of
   //               channels is the keyboard channel.
   StreamConfig(int sample_rate_hz = 0,
-               int num_channels = 0,
+               size_t num_channels = 0,
                bool has_keyboard = false)
       : sample_rate_hz_(sample_rate_hz),
         num_channels_(num_channels),
@@ -513,14 +513,14 @@
     sample_rate_hz_ = value;
     num_frames_ = calculate_frames(value);
   }
-  void set_num_channels(int value) { num_channels_ = value; }
+  void set_num_channels(size_t value) { num_channels_ = value; }
   void set_has_keyboard(bool value) { has_keyboard_ = value; }
 
   int sample_rate_hz() const { return sample_rate_hz_; }
 
   // The number of channels in the stream, not including the keyboard channel if
   // present.
-  int num_channels() const { return num_channels_; }
+  size_t num_channels() const { return num_channels_; }
 
   bool has_keyboard() const { return has_keyboard_; }
   size_t num_frames() const { return num_frames_; }
@@ -541,7 +541,7 @@
   }
 
   int sample_rate_hz_;
-  int num_channels_;
+  size_t num_channels_;
   bool has_keyboard_;
   size_t num_frames_;
 };
diff --git a/webrtc/modules/audio_processing/include/mock_audio_processing.h b/webrtc/modules/audio_processing/include/mock_audio_processing.h
index 4ff52ba..9e1f2d5 100644
--- a/webrtc/modules/audio_processing/include/mock_audio_processing.h
+++ b/webrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -201,11 +201,11 @@
   MOCK_CONST_METHOD0(proc_split_sample_rate_hz,
       int());
   MOCK_CONST_METHOD0(num_input_channels,
-      int());
+      size_t());
   MOCK_CONST_METHOD0(num_output_channels,
-      int());
+      size_t());
   MOCK_CONST_METHOD0(num_reverse_channels,
-      int());
+      size_t());
   MOCK_METHOD1(set_output_will_be_muted,
       void(bool muted));
   MOCK_CONST_METHOD0(output_will_be_muted,
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
index d014ce0..fe964ab 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -54,12 +54,12 @@
 
 void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
     const complex<float>* const* in_block,
-    int in_channels,
+    size_t in_channels,
     size_t frames,
-    int /* out_channels */,
+    size_t /* out_channels */,
     complex<float>* const* out_block) {
   RTC_DCHECK_EQ(parent_->freqs_, frames);
-  for (int i = 0; i < in_channels; ++i) {
+  for (size_t i = 0; i < in_channels; ++i) {
     parent_->DispatchAudio(source_, in_block[i], out_block[i]);
   }
 }
@@ -129,7 +129,7 @@
 
 void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
                                                  int sample_rate_hz,
-                                                 int num_channels) {
+                                                 size_t num_channels) {
   RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
   RTC_CHECK_EQ(num_render_channels_, num_channels);
 
@@ -138,7 +138,7 @@
   }
 
   if (active_) {
-    for (int i = 0; i < num_render_channels_; ++i) {
+    for (size_t i = 0; i < num_render_channels_; ++i) {
       memcpy(audio[i], temp_render_out_buffer_.channels()[i],
              chunk_length_ * sizeof(**audio));
     }
@@ -147,7 +147,7 @@
 
 void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio,
                                                   int sample_rate_hz,
-                                                  int num_channels) {
+                                                  size_t num_channels) {
   RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
   RTC_CHECK_EQ(num_capture_channels_, num_channels);
 
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
index 1e9e35a..1eb2234 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
@@ -47,8 +47,8 @@
           gain_change_limit(0.1f),
           rho(0.02f) {}
     int sample_rate_hz;
-    int num_capture_channels;
-    int num_render_channels;
+    size_t num_capture_channels;
+    size_t num_render_channels;
     intelligibility::VarianceArray::StepType var_type;
     float var_decay_rate;
     size_t var_window_size;
@@ -63,12 +63,12 @@
   // Reads and processes chunk of noise stream in time domain.
   void AnalyzeCaptureAudio(float* const* audio,
                            int sample_rate_hz,
-                           int num_channels);
+                           size_t num_channels);
 
   // Reads chunk of speech in time domain and updates with modified signal.
   void ProcessRenderAudio(float* const* audio,
                           int sample_rate_hz,
-                          int num_channels);
+                          size_t num_channels);
   bool active() const;
 
  private:
@@ -85,9 +85,9 @@
     // All in frequency domain, receives input |in_block|, applies
     // intelligibility enhancement, and writes result to |out_block|.
     void ProcessAudioBlock(const std::complex<float>* const* in_block,
-                           int in_channels,
+                           size_t in_channels,
                            size_t frames,
-                           int out_channels,
+                           size_t out_channels,
                            std::complex<float>* const* out_block) override;
 
    private:
@@ -144,8 +144,8 @@
   const size_t bank_size_;     // Num ERB filters.
   const int sample_rate_hz_;
   const int erb_resolution_;
-  const int num_capture_channels_;
-  const int num_render_channels_;
+  const size_t num_capture_channels_;
+  const size_t num_render_channels_;
   const int analysis_rate_;    // Num blocks before gains recalculated.
 
   const bool active_;          // Whether render gains are being updated.
diff --git a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
index 27d0ab4..4d2f5f4 100644
--- a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
+++ b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
@@ -68,7 +68,7 @@
               "Enhanced output. Use '-' to "
               "play through aplay immediately.");
 
-const int kNumChannels = 1;
+const size_t kNumChannels = 1;
 
 // void function for gtest
 void void_main(int argc, char* argv[]) {
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.cc b/webrtc/modules/audio_processing/level_estimator_impl.cc
index aa676a8..187873e 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -35,7 +35,7 @@
     return;
   }
 
-  for (int i = 0; i < audio->num_channels(); i++) {
+  for (size_t i = 0; i < audio->num_channels(); i++) {
     rms_->Process(audio->channels_const()[i], audio->num_frames());
   }
 }
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
index a40cd63..de7e856 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -52,15 +52,14 @@
 
 NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
 
-void NoiseSuppressionImpl::Initialize(int channels, int sample_rate_hz) {
-  RTC_DCHECK_LE(0, channels);
+void NoiseSuppressionImpl::Initialize(size_t channels, int sample_rate_hz) {
   rtc::CritScope cs(crit_);
   channels_ = channels;
   sample_rate_hz_ = sample_rate_hz;
   std::vector<rtc::scoped_ptr<Suppressor>> new_suppressors;
   if (enabled_) {
     new_suppressors.resize(channels);
-    for (int i = 0; i < channels; i++) {
+    for (size_t i = 0; i < channels; i++) {
       new_suppressors[i].reset(new Suppressor(sample_rate_hz));
     }
   }
@@ -77,8 +76,7 @@
   }
 
   RTC_DCHECK_GE(160u, audio->num_frames_per_band());
-  RTC_DCHECK_EQ(suppressors_.size(),
-                static_cast<size_t>(audio->num_channels()));
+  RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
   for (size_t i = 0; i < suppressors_.size(); i++) {
     WebRtcNs_Analyze(suppressors_[i]->state(),
                      audio->split_bands_const_f(i)[kBand0To8kHz]);
@@ -94,8 +92,7 @@
   }
 
   RTC_DCHECK_GE(160u, audio->num_frames_per_band());
-  RTC_DCHECK_EQ(suppressors_.size(),
-                static_cast<size_t>(audio->num_channels()));
+  RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
   for (size_t i = 0; i < suppressors_.size(); i++) {
 #if defined(WEBRTC_NS_FLOAT)
     WebRtcNs_Process(suppressors_[i]->state(),
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.h b/webrtc/modules/audio_processing/noise_suppression_impl.h
index 491c4d2..debbc61 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.h
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.h
@@ -26,7 +26,7 @@
   ~NoiseSuppressionImpl() override;
 
   // TODO(peah): Fold into ctor, once public API is removed.
-  void Initialize(int channels, int sample_rate_hz);
+  void Initialize(size_t channels, int sample_rate_hz);
   void AnalyzeCaptureAudio(AudioBuffer* audio);
   void ProcessCaptureAudio(AudioBuffer* audio);
 
@@ -42,7 +42,7 @@
   rtc::CriticalSection* const crit_;
   bool enabled_ GUARDED_BY(crit_) = false;
   Level level_ GUARDED_BY(crit_) = kModerate;
-  int channels_ GUARDED_BY(crit_) = 0;
+  size_t channels_ GUARDED_BY(crit_) = 0;
   int sample_rate_hz_ GUARDED_BY(crit_) = 0;
   std::vector<rtc::scoped_ptr<Suppressor>> suppressors_ GUARDED_BY(crit_);
   RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(NoiseSuppressionImpl);
diff --git a/webrtc/modules/audio_processing/processing_component.cc b/webrtc/modules/audio_processing/processing_component.cc
index 9e16d7c..7abd8e2 100644
--- a/webrtc/modules/audio_processing/processing_component.cc
+++ b/webrtc/modules/audio_processing/processing_component.cc
@@ -55,12 +55,12 @@
   return enabled_;
 }
 
-void* ProcessingComponent::handle(int index) const {
+void* ProcessingComponent::handle(size_t index) const {
   assert(index < num_handles_);
   return handles_[index];
 }
 
-int ProcessingComponent::num_handles() const {
+size_t ProcessingComponent::num_handles() const {
   return num_handles_;
 }
 
@@ -70,12 +70,12 @@
   }
 
   num_handles_ = num_handles_required();
-  if (num_handles_ > static_cast<int>(handles_.size())) {
+  if (num_handles_ > handles_.size()) {
     handles_.resize(num_handles_, NULL);
   }
 
-  assert(static_cast<int>(handles_.size()) >= num_handles_);
-  for (int i = 0; i < num_handles_; i++) {
+  assert(handles_.size() >= num_handles_);
+  for (size_t i = 0; i < num_handles_; i++) {
     if (handles_[i] == NULL) {
       handles_[i] = CreateHandle();
       if (handles_[i] == NULL) {
@@ -98,8 +98,8 @@
     return AudioProcessing::kNoError;
   }
 
-  assert(static_cast<int>(handles_.size()) >= num_handles_);
-  for (int i = 0; i < num_handles_; i++) {
+  assert(handles_.size() >= num_handles_);
+  for (size_t i = 0; i < num_handles_; i++) {
     int err = ConfigureHandle(handles_[i]);
     if (err != AudioProcessing::kNoError) {
       return GetHandleError(handles_[i]);
diff --git a/webrtc/modules/audio_processing/processing_component.h b/webrtc/modules/audio_processing/processing_component.h
index 291aea3..577f157 100644
--- a/webrtc/modules/audio_processing/processing_component.h
+++ b/webrtc/modules/audio_processing/processing_component.h
@@ -47,21 +47,21 @@
  protected:
   virtual int Configure();
   int EnableComponent(bool enable);
-  void* handle(int index) const;
-  int num_handles() const;
+  void* handle(size_t index) const;
+  size_t num_handles() const;
 
  private:
   virtual void* CreateHandle() const = 0;
   virtual int InitializeHandle(void* handle) const = 0;
   virtual int ConfigureHandle(void* handle) const = 0;
   virtual void DestroyHandle(void* handle) const = 0;
-  virtual int num_handles_required() const = 0;
+  virtual size_t num_handles_required() const = 0;
   virtual int GetHandleError(void* handle) const = 0;
 
   std::vector<void*> handles_;
   bool initialized_;
   bool enabled_;
-  int num_handles_;
+  size_t num_handles_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc
index 60427e2..46cc935 100644
--- a/webrtc/modules/audio_processing/splitting_filter.cc
+++ b/webrtc/modules/audio_processing/splitting_filter.cc
@@ -16,7 +16,7 @@
 
 namespace webrtc {
 
-SplittingFilter::SplittingFilter(int num_channels,
+SplittingFilter::SplittingFilter(size_t num_channels,
                                  size_t num_bands,
                                  size_t num_frames)
     : num_bands_(num_bands) {
@@ -24,7 +24,7 @@
   if (num_bands_ == 2) {
     two_bands_states_.resize(num_channels);
   } else if (num_bands_ == 3) {
-    for (int i = 0; i < num_channels; ++i) {
+    for (size_t i = 0; i < num_channels; ++i) {
       three_band_filter_banks_.push_back(new ThreeBandFilterBank(num_frames));
     }
   }
@@ -58,8 +58,7 @@
 
 void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
                                        IFChannelBuffer* bands) {
-  RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
-                data->num_channels());
+  RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
   for (size_t i = 0; i < two_bands_states_.size(); ++i) {
     WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i],
                           data->num_frames(),
@@ -72,8 +71,7 @@
 
 void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
                                         IFChannelBuffer* data) {
-  RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
-                data->num_channels());
+  RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
   for (size_t i = 0; i < two_bands_states_.size(); ++i) {
     WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
                            bands->ibuf_const()->channels(1)[i],
@@ -86,8 +84,7 @@
 
 void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
                                          IFChannelBuffer* bands) {
-  RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
-                data->num_channels());
+  RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
   for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
     three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i],
                                           data->num_frames(),
@@ -97,8 +94,7 @@
 
 void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
                                           IFChannelBuffer* data) {
-  RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
-                data->num_channels());
+  RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
   for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
     three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
                                            bands->num_frames_per_band(),
diff --git a/webrtc/modules/audio_processing/splitting_filter.h b/webrtc/modules/audio_processing/splitting_filter.h
index 4698d3f..6b81c2f 100644
--- a/webrtc/modules/audio_processing/splitting_filter.h
+++ b/webrtc/modules/audio_processing/splitting_filter.h
@@ -45,7 +45,7 @@
 // used.
 class SplittingFilter {
  public:
-  SplittingFilter(int num_channels, size_t num_bands, size_t num_frames);
+  SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames);
 
   void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
   void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
diff --git a/webrtc/modules/audio_processing/test/audio_file_processor.cc b/webrtc/modules/audio_processing/test/audio_file_processor.cc
index 4c77356..56e9b4b 100644
--- a/webrtc/modules/audio_processing/test/audio_file_processor.cc
+++ b/webrtc/modules/audio_processing/test/audio_file_processor.cc
@@ -132,7 +132,8 @@
 
 void AecDumpFileProcessor::HandleMessage(const Stream& msg) {
   RTC_CHECK(!msg.has_input_data());
-  RTC_CHECK_EQ(in_buf_->num_channels(), msg.input_channel_size());
+  RTC_CHECK_EQ(in_buf_->num_channels(),
+               static_cast<size_t>(msg.input_channel_size()));
 
   for (int i = 0; i < msg.input_channel_size(); ++i) {
     RTC_CHECK_EQ(in_buf_->num_frames() * sizeof(*in_buf_->channels()[i]),
@@ -157,7 +158,8 @@
 
 void AecDumpFileProcessor::HandleMessage(const ReverseStream& msg) {
   RTC_CHECK(!msg.has_data());
-  RTC_CHECK_EQ(reverse_buf_->num_channels(), msg.channel_size());
+  RTC_CHECK_EQ(reverse_buf_->num_channels(),
+               static_cast<size_t>(msg.channel_size()));
 
   for (int i = 0; i < msg.channel_size(); ++i) {
     RTC_CHECK_EQ(reverse_buf_->num_frames() * sizeof(*in_buf_->channels()[i]),
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index 6eae1e5..94aea17 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -69,7 +69,7 @@
                cb->num_frames(),
                cb->num_channels(),
                cb_int.channels());
-  for (int i = 0; i < cb->num_channels(); ++i) {
+  for (size_t i = 0; i < cb->num_channels(); ++i) {
     S16ToFloat(cb_int.channels()[i],
                cb->num_frames(),
                cb->channels()[i]);
@@ -81,7 +81,7 @@
 }
 
 // Number of channels including the keyboard channel.
-int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
   switch (layout) {
     case AudioProcessing::kMono:
       return 1;
@@ -131,7 +131,7 @@
 }
 
 void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
-  ASSERT_EQ(2, frame->num_channels_);
+  ASSERT_EQ(2u, frame->num_channels_);
   for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
     frame->data_[i] = left;
     frame->data_[i + 1] = right;
@@ -256,10 +256,10 @@
                            int output_rate,
                            int reverse_input_rate,
                            int reverse_output_rate,
-                           int num_input_channels,
-                           int num_output_channels,
-                           int num_reverse_input_channels,
-                           int num_reverse_output_channels,
+                           size_t num_input_channels,
+                           size_t num_output_channels,
+                           size_t num_reverse_input_channels,
+                           size_t num_reverse_output_channels,
                            StreamDirection file_direction) {
   std::ostringstream ss;
   ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
@@ -354,9 +354,9 @@
   void Init(int sample_rate_hz,
             int output_sample_rate_hz,
             int reverse_sample_rate_hz,
-            int num_input_channels,
-            int num_output_channels,
-            int num_reverse_channels,
+            size_t num_input_channels,
+            size_t num_output_channels,
+            size_t num_reverse_channels,
             bool open_output_file);
   void Init(AudioProcessing* ap);
   void EnableAllComponents();
@@ -369,12 +369,12 @@
   void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
                                     int delay_min, int delay_max);
   void TestChangingChannelsInt16Interface(
-      int num_channels,
+      size_t num_channels,
       AudioProcessing::Error expected_return);
-  void TestChangingForwardChannels(int num_in_channels,
-                                   int num_out_channels,
+  void TestChangingForwardChannels(size_t num_in_channels,
+                                   size_t num_out_channels,
                                    AudioProcessing::Error expected_return);
-  void TestChangingReverseChannels(int num_rev_channels,
+  void TestChangingReverseChannels(size_t num_rev_channels,
                                    AudioProcessing::Error expected_return);
   void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
   void RunManualVolumeChangeIsPossibleTest(int sample_rate);
@@ -395,7 +395,7 @@
   rtc::scoped_ptr<ChannelBuffer<float> > float_cb_;
   rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
   int output_sample_rate_hz_;
-  int num_output_channels_;
+  size_t num_output_channels_;
   FILE* far_file_;
   FILE* near_file_;
   FILE* out_file_;
@@ -479,9 +479,9 @@
 void ApmTest::Init(int sample_rate_hz,
                    int output_sample_rate_hz,
                    int reverse_sample_rate_hz,
-                   int num_input_channels,
-                   int num_output_channels,
-                   int num_reverse_channels,
+                   size_t num_input_channels,
+                   size_t num_output_channels,
+                   size_t num_reverse_channels,
                    bool open_output_file) {
   SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
   output_sample_rate_hz_ = output_sample_rate_hz;
@@ -813,7 +813,7 @@
 }
 
 void ApmTest::TestChangingChannelsInt16Interface(
-    int num_channels,
+    size_t num_channels,
     AudioProcessing::Error expected_return) {
   frame_->num_channels_ = num_channels;
   EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
@@ -821,8 +821,8 @@
 }
 
 void ApmTest::TestChangingForwardChannels(
-    int num_in_channels,
-    int num_out_channels,
+    size_t num_in_channels,
+    size_t num_out_channels,
     AudioProcessing::Error expected_return) {
   const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
   const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
@@ -833,7 +833,7 @@
 }
 
 void ApmTest::TestChangingReverseChannels(
-    int num_rev_channels,
+    size_t num_rev_channels,
     AudioProcessing::Error expected_return) {
   const ProcessingConfig processing_config = {
       {{frame_->sample_rate_hz_, apm_->num_input_channels()},
@@ -854,11 +854,11 @@
 
   TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
 
-  for (int i = 1; i < 4; i++) {
+  for (size_t i = 1; i < 4; i++) {
     TestChangingChannelsInt16Interface(i, kNoErr);
     EXPECT_EQ(i, apm_->num_input_channels());
     // We always force the number of reverse channels used for processing to 1.
-    EXPECT_EQ(1, apm_->num_reverse_channels());
+    EXPECT_EQ(1u, apm_->num_reverse_channels());
   }
 }
 
@@ -869,8 +869,8 @@
   TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
   TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
 
-  for (int i = 1; i < 4; ++i) {
-    for (int j = 0; j < 1; ++j) {
+  for (size_t i = 1; i < 4; ++i) {
+    for (size_t j = 0; j < 1; ++j) {
       // Output channels much be one or match input channels.
       if (j == 1 || i == j) {
         TestChangingForwardChannels(i, j, kNoErr);
@@ -879,7 +879,7 @@
         EXPECT_EQ(i, apm_->num_input_channels());
         EXPECT_EQ(j, apm_->num_output_channels());
         // The number of reverse channels used for processing to is always 1.
-        EXPECT_EQ(1, apm_->num_reverse_channels());
+        EXPECT_EQ(1u, apm_->num_reverse_channels());
       } else {
         TestChangingForwardChannels(i, j,
                                     AudioProcessing::kBadNumberChannelsError);
@@ -1293,8 +1293,8 @@
   const int kSampleRateHz = 16000;
   const size_t kSamplesPerChannel =
       static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
-  const int kNumInputChannels = 2;
-  const int kNumOutputChannels = 1;
+  const size_t kNumInputChannels = 2;
+  const size_t kNumOutputChannels = 1;
   const size_t kNumChunks = 700;
   const float kScaleFactor = 0.25f;
   Config config;
@@ -1327,7 +1327,7 @@
                           int_data.get(),
                           float_data.get(),
                           &src_buf));
-    for (int j = 0; j < kNumInputChannels; ++j) {
+    for (size_t j = 0; j < kNumInputChannels; ++j) {
       for (size_t k = 0; k < kSamplesPerChannel; ++k) {
         src_buf.channels()[j][k] *= kScaleFactor;
       }
@@ -1352,7 +1352,7 @@
                           int_data.get(),
                           float_data.get(),
                           &src_buf));
-    for (int j = 0; j < kNumInputChannels; ++j) {
+    for (size_t j = 0; j < kNumInputChannels; ++j) {
       for (size_t k = 0; k < kSamplesPerChannel; ++k) {
         src_buf.channels()[j][k] *= kScaleFactor;
       }
@@ -1742,7 +1742,8 @@
       const audioproc::ReverseStream msg = event_msg.reverse_stream();
 
       if (msg.channel_size() > 0) {
-        ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
+        ASSERT_EQ(revframe_->num_channels_,
+                  static_cast<size_t>(msg.channel_size()));
         for (int i = 0; i < msg.channel_size(); ++i) {
            memcpy(revfloat_cb_->channels()[i],
                   msg.channel(i).data(),
@@ -1772,7 +1773,8 @@
       }
 
       if (msg.input_channel_size() > 0) {
-        ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
+        ASSERT_EQ(frame_->num_channels_,
+                  static_cast<size_t>(msg.input_channel_size()));
         for (int i = 0; i < msg.input_channel_size(); ++i) {
            memcpy(float_cb_->channels()[i],
                   msg.input_channel(i).data(),
@@ -1930,9 +1932,12 @@
     if (test->num_input_channels() != test->num_output_channels())
       continue;
 
-    const int num_render_channels = test->num_reverse_channels();
-    const int num_input_channels = test->num_input_channels();
-    const int num_output_channels = test->num_output_channels();
+    const size_t num_render_channels =
+        static_cast<size_t>(test->num_reverse_channels());
+    const size_t num_input_channels =
+        static_cast<size_t>(test->num_input_channels());
+    const size_t num_output_channels =
+        static_cast<size_t>(test->num_output_channels());
     const size_t samples_per_channel = static_cast<size_t>(
         test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
 
@@ -1975,7 +1980,7 @@
           test->sample_rate(),
           LayoutFromChannels(num_output_channels),
           float_cb_->channels()));
-      for (int j = 0; j < num_output_channels; ++j) {
+      for (size_t j = 0; j < num_output_channels; ++j) {
         FloatToS16(float_cb_->channels()[j],
                    samples_per_channel,
                    output_cb.channels()[j]);
@@ -2008,7 +2013,7 @@
                   0.01);
 
       // Reset in case of downmixing.
-      frame_->num_channels_ = test->num_input_channels();
+      frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
     }
     rewind(far_file_);
     rewind(near_file_);
@@ -2069,9 +2074,9 @@
     Init(test->sample_rate(),
          test->sample_rate(),
          test->sample_rate(),
-         test->num_input_channels(),
-         test->num_output_channels(),
-         test->num_reverse_channels(),
+         static_cast<size_t>(test->num_input_channels()),
+         static_cast<size_t>(test->num_output_channels()),
+         static_cast<size_t>(test->num_reverse_channels()),
          true);
 
     int frame_count = 0;
@@ -2096,7 +2101,8 @@
       EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
 
       // Ensure the frame was downmixed properly.
-      EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
+      EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
+                frame_->num_channels_);
 
       max_output_average += MaxAudioFrame(*frame_);
 
@@ -2126,7 +2132,7 @@
       ASSERT_EQ(frame_size, write_count);
 
       // Reset in case of downmixing.
-      frame_->num_channels_ = test->num_input_channels();
+      frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
       frame_count++;
     }
     max_output_average /= frame_count;
@@ -2350,7 +2356,7 @@
   static void SetUpTestCase() {
     // Create all needed output reference files.
     const int kNativeRates[] = {8000, 16000, 32000, 48000};
-    const int kNumChannels[] = {1, 2};
+    const size_t kNumChannels[] = {1, 2};
     for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
       for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
         for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
@@ -2374,10 +2380,10 @@
                             int output_rate,
                             int reverse_input_rate,
                             int reverse_output_rate,
-                            int num_input_channels,
-                            int num_output_channels,
-                            int num_reverse_input_channels,
-                            int num_reverse_output_channels,
+                            size_t num_input_channels,
+                            size_t num_output_channels,
+                            size_t num_reverse_input_channels,
+                            size_t num_reverse_output_channels,
                             std::string output_file_prefix) {
     Config config;
     config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
diff --git a/webrtc/modules/audio_processing/test/audioproc_float.cc b/webrtc/modules/audio_processing/test/audioproc_float.cc
index d64b006..a489d25 100644
--- a/webrtc/modules/audio_processing/test/audioproc_float.cc
+++ b/webrtc/modules/audio_processing/test/audioproc_float.cc
@@ -16,6 +16,7 @@
 
 #include "gflags/gflags.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/channel_buffer.h"
 #include "webrtc/common_audio/wav_file.h"
@@ -26,6 +27,14 @@
 #include "webrtc/system_wrappers/include/tick_util.h"
 #include "webrtc/test/testsupport/trace_to_stderr.h"
 
+namespace {
+
+bool ValidateOutChannels(const char* flagname, int32_t value) {
+  return value >= 0;
+}
+
+}  // namespace
+
 DEFINE_string(dump, "", "Name of the aecdump debug file to read from.");
 DEFINE_string(i, "", "Name of the capture input stream file to read from.");
 DEFINE_string(
@@ -33,6 +42,8 @@
     "out.wav",
     "Name of the output file to write the processed capture stream to.");
 DEFINE_int32(out_channels, 1, "Number of output channels.");
+const bool out_channels_dummy =
+    google::RegisterFlagValidator(&FLAGS_out_channels, &ValidateOutChannels);
 DEFINE_int32(out_sample_rate, 48000, "Output sample rate in Hz.");
 DEFINE_string(mic_positions, "",
     "Space delimited cartesian coordinates of microphones in meters. "
@@ -117,8 +128,8 @@
   ap->set_stream_key_pressed(FLAGS_ts);
 
   rtc::scoped_ptr<AudioFileProcessor> processor;
-  auto out_file = rtc_make_scoped_ptr(
-      new WavWriter(FLAGS_o, FLAGS_out_sample_rate, FLAGS_out_channels));
+  auto out_file = rtc_make_scoped_ptr(new WavWriter(
+      FLAGS_o, FLAGS_out_sample_rate, static_cast<size_t>(FLAGS_out_channels)));
   std::cout << FLAGS_o << ": " << out_file->FormatAsString() << std::endl;
   if (FLAGS_dump.empty()) {
     auto in_file = rtc_make_scoped_ptr(new WavReader(FLAGS_i));
diff --git a/webrtc/modules/audio_processing/test/debug_dump_test.cc b/webrtc/modules/audio_processing/test/debug_dump_test.cc
index d2dd9c8..005faa0 100644
--- a/webrtc/modules/audio_processing/test/debug_dump_test.cc
+++ b/webrtc/modules/audio_processing/test/debug_dump_test.cc
@@ -327,7 +327,8 @@
   else
     apm_->set_stream_key_pressed(true);
 
-  ASSERT_EQ(input_config_.num_channels(), msg.input_channel_size());
+  ASSERT_EQ(input_config_.num_channels(),
+            static_cast<size_t>(msg.input_channel_size()));
   ASSERT_EQ(input_config_.num_frames() * sizeof(float),
             msg.input_channel(0).size());
 
@@ -341,7 +342,8 @@
                                 output_config_, output_->channels()));
 
   // Check that output of APM is bit-exact to the output in the dump.
-  ASSERT_EQ(output_config_.num_channels(), msg.output_channel_size());
+  ASSERT_EQ(output_config_.num_channels(),
+            static_cast<size_t>(msg.output_channel_size()));
   ASSERT_EQ(output_config_.num_frames() * sizeof(float),
             msg.output_channel(0).size());
   for (int i = 0; i < msg.output_channel_size(); ++i) {
@@ -355,7 +357,8 @@
   ASSERT_TRUE(apm_.get());
 
   ASSERT_GT(msg.channel_size(), 0);
-  ASSERT_EQ(reverse_config_.num_channels(), msg.channel_size());
+  ASSERT_EQ(reverse_config_.num_channels(),
+            static_cast<size_t>(msg.channel_size()));
   ASSERT_EQ(reverse_config_.num_frames() * sizeof(float),
             msg.channel(0).size());
 
diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc
index ae6b4dc..6e20a78 100644
--- a/webrtc/modules/audio_processing/test/process_test.cc
+++ b/webrtc/modules/audio_processing/test/process_test.cc
@@ -17,6 +17,7 @@
 
 #include <algorithm>
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -159,9 +160,9 @@
 
   int32_t sample_rate_hz = 16000;
 
-  int num_capture_input_channels = 1;
-  int num_capture_output_channels = 1;
-  int num_render_channels = 1;
+  size_t num_capture_input_channels = 1;
+  size_t num_capture_output_channels = 1;
+  size_t num_render_channels = 1;
 
   int samples_per_channel = sample_rate_hz / 100;
 
@@ -207,14 +208,14 @@
     } else if (strcmp(argv[i], "-ch") == 0) {
       i++;
       ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
+      ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_capture_input_channels));
       i++;
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
+      ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_capture_output_channels));
 
     } else if (strcmp(argv[i], "-rch") == 0) {
       i++;
       ASSERT_LT(i, argc) << "Specify number of channels after -rch";
-      ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
+      ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_render_channels));
 
     } else if (strcmp(argv[i], "-aec") == 0) {
       ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
@@ -447,10 +448,10 @@
 
   if (verbose) {
     printf("Sample rate: %d Hz\n", sample_rate_hz);
-    printf("Primary channels: %d (in), %d (out)\n",
+    printf("Primary channels: %" PRIuS " (in), %" PRIuS " (out)\n",
            num_capture_input_channels,
            num_capture_output_channels);
-    printf("Reverse channels: %d \n", num_render_channels);
+    printf("Reverse channels: %" PRIuS "\n", num_render_channels);
   }
 
   const std::string out_path = webrtc::test::OutputPath();
@@ -601,14 +602,18 @@
         if (msg.has_output_sample_rate()) {
           output_sample_rate = msg.output_sample_rate();
         }
-        output_layout = LayoutFromChannels(msg.num_output_channels());
-        ASSERT_EQ(kNoErr, apm->Initialize(
-                              msg.sample_rate(),
-                              output_sample_rate,
-                              reverse_sample_rate,
-                              LayoutFromChannels(msg.num_input_channels()),
-                              output_layout,
-                              LayoutFromChannels(msg.num_reverse_channels())));
+        output_layout =
+            LayoutFromChannels(static_cast<size_t>(msg.num_output_channels()));
+        ASSERT_EQ(kNoErr,
+                  apm->Initialize(
+                      msg.sample_rate(),
+                      output_sample_rate,
+                      reverse_sample_rate,
+                      LayoutFromChannels(
+                          static_cast<size_t>(msg.num_input_channels())),
+                      output_layout,
+                      LayoutFromChannels(
+                          static_cast<size_t>(msg.num_reverse_channels()))));
 
         samples_per_channel = msg.sample_rate() / 100;
         far_frame.sample_rate_hz_ = reverse_sample_rate;
@@ -638,9 +643,9 @@
         if (!raw_output) {
           // The WAV file needs to be reset every time, because it can't change
           // its sample rate or number of channels.
-          output_wav_file.reset(new WavWriter(out_filename + ".wav",
-                                              output_sample_rate,
-                                              msg.num_output_channels()));
+          output_wav_file.reset(new WavWriter(
+              out_filename + ".wav", output_sample_rate,
+              static_cast<size_t>(msg.num_output_channels())));
         }
 
       } else if (event_msg.type() == Event::REVERSE_STREAM) {
diff --git a/webrtc/modules/audio_processing/test/test_utils.cc b/webrtc/modules/audio_processing/test/test_utils.cc
index 74f8b73..0bd7012 100644
--- a/webrtc/modules/audio_processing/test/test_utils.cc
+++ b/webrtc/modules/audio_processing/test/test_utils.cc
@@ -76,7 +76,7 @@
 
 void WriteFloatData(const float* const* data,
                     size_t samples_per_channel,
-                    int num_channels,
+                    size_t num_channels,
                     WavWriter* wav_file,
                     RawFile* raw_file) {
   size_t length = num_channels * samples_per_channel;
@@ -116,7 +116,7 @@
       sample_rate_hz / 1000;
 }
 
-AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels) {
+AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels) {
   switch (num_channels) {
     case 1:
       return AudioProcessing::kMono;
diff --git a/webrtc/modules/audio_processing/test/test_utils.h b/webrtc/modules/audio_processing/test/test_utils.h
index 291e03e..e23beb6 100644
--- a/webrtc/modules/audio_processing/test/test_utils.h
+++ b/webrtc/modules/audio_processing/test/test_utils.h
@@ -79,7 +79,7 @@
 
 void WriteFloatData(const float* const* data,
                     size_t samples_per_channel,
-                    int num_channels,
+                    size_t num_channels,
                     WavWriter* wav_file,
                     RawFile* raw_file);
 
@@ -93,7 +93,7 @@
 
 template <typename T>
 void SetContainerFormat(int sample_rate_hz,
-                        int num_channels,
+                        size_t num_channels,
                         AudioFrame* frame,
                         rtc::scoped_ptr<ChannelBuffer<T> >* cb) {
   SetFrameSampleRate(frame, sample_rate_hz);
@@ -101,7 +101,7 @@
   cb->reset(new ChannelBuffer<T>(frame->samples_per_channel_, num_channels));
 }
 
-AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels);
+AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels);
 
 template <typename T>
 float ComputeSNR(const T* ref, const T* test, size_t length, float* variance) {
diff --git a/webrtc/modules/audio_processing/test/unpack.cc b/webrtc/modules/audio_processing/test/unpack.cc
index cd9205e..8b2b082 100644
--- a/webrtc/modules/audio_processing/test/unpack.cc
+++ b/webrtc/modules/audio_processing/test/unpack.cc
@@ -17,6 +17,7 @@
 
 #include "gflags/gflags.h"
 #include "webrtc/audio_processing/debug.pb.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/modules/audio_processing/test/protobuf_utils.h"
 #include "webrtc/modules/audio_processing/test/test_utils.h"
@@ -79,9 +80,9 @@
   size_t reverse_samples_per_channel = 0;
   size_t input_samples_per_channel = 0;
   size_t output_samples_per_channel = 0;
-  int num_reverse_channels = 0;
-  int num_input_channels = 0;
-  int num_output_channels = 0;
+  size_t num_reverse_channels = 0;
+  size_t num_input_channels = 0;
+  size_t num_output_channels = 0;
   rtc::scoped_ptr<WavWriter> reverse_wav_file;
   rtc::scoped_ptr<WavWriter> input_wav_file;
   rtc::scoped_ptr<WavWriter> output_wav_file;
@@ -117,7 +118,7 @@
         }
         rtc::scoped_ptr<const float* []> data(
             new const float* [num_reverse_channels]);
-        for (int i = 0; i < num_reverse_channels; ++i) {
+        for (size_t i = 0; i < num_reverse_channels; ++i) {
           data[i] = reinterpret_cast<const float*>(msg.channel(i).data());
         }
         WriteFloatData(data.get(),
@@ -148,7 +149,7 @@
         }
         rtc::scoped_ptr<const float* []> data(
             new const float* [num_input_channels]);
-        for (int i = 0; i < num_input_channels; ++i) {
+        for (size_t i = 0; i < num_input_channels; ++i) {
           data[i] = reinterpret_cast<const float*>(msg.input_channel(i).data());
         }
         WriteFloatData(data.get(),
@@ -172,7 +173,7 @@
         }
         rtc::scoped_ptr<const float* []> data(
             new const float* [num_output_channels]);
-        for (int i = 0; i < num_output_channels; ++i) {
+        for (size_t i = 0; i < num_output_channels; ++i) {
           data[i] =
               reinterpret_cast<const float*>(msg.output_channel(i).data());
         }
@@ -268,11 +269,14 @@
               "  Reverse sample rate: %d\n",
               reverse_sample_rate);
       num_input_channels = msg.num_input_channels();
-      fprintf(settings_file, "  Input channels: %d\n", num_input_channels);
+      fprintf(settings_file, "  Input channels: %" PRIuS "\n",
+              num_input_channels);
       num_output_channels = msg.num_output_channels();
-      fprintf(settings_file, "  Output channels: %d\n", num_output_channels);
+      fprintf(settings_file, "  Output channels: %" PRIuS "\n",
+              num_output_channels);
       num_reverse_channels = msg.num_reverse_channels();
-      fprintf(settings_file, "  Reverse channels: %d\n", num_reverse_channels);
+      fprintf(settings_file, "  Reverse channels: %" PRIuS "\n",
+              num_reverse_channels);
 
       fprintf(settings_file, "\n");
 
diff --git a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
index ef56a35..fc9d103 100644
--- a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -18,7 +18,7 @@
 namespace {
 
 const size_t kMaxLength = 320;
-const int kNumChannels = 1;
+const size_t kNumChannels = 1;
 
 const double kDefaultVoiceValue = 1.0;
 const double kNeutralProbability = 0.5;
diff --git a/webrtc/modules/include/module_common_types.h b/webrtc/modules/include/module_common_types.h
index 3a63af0..89c5f1b 100644
--- a/webrtc/modules/include/module_common_types.h
+++ b/webrtc/modules/include/module_common_types.h
@@ -28,7 +28,7 @@
   uint8_t numEnergy;                  // number of valid entries in arrOfEnergy
   uint8_t arrOfEnergy[kRtpCsrcSize];  // one energy byte (0-9) per channel
   bool isCNG;                         // is this CNG
-  uint8_t channel;                    // number of channels 2 = stereo
+  size_t channel;                     // number of channels 2 = stereo
 };
 
 const int16_t kNoPictureId = -1;
@@ -508,7 +508,7 @@
   void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
                    size_t samples_per_channel, int sample_rate_hz,
                    SpeechType speech_type, VADActivity vad_activity,
-                   int num_channels = 1, uint32_t energy = -1);
+                   size_t num_channels = 1, uint32_t energy = -1);
 
   AudioFrame& Append(const AudioFrame& rhs);
 
@@ -532,7 +532,7 @@
   int16_t data_[kMaxDataSizeSamples];
   size_t samples_per_channel_;
   int sample_rate_hz_;
-  int num_channels_;
+  size_t num_channels_;
   SpeechType speech_type_;
   VADActivity vad_activity_;
   // Note that there is no guarantee that |energy_| is correct. Any user of this
@@ -574,7 +574,7 @@
                                     int sample_rate_hz,
                                     SpeechType speech_type,
                                     VADActivity vad_activity,
-                                    int num_channels,
+                                    size_t num_channels,
                                     uint32_t energy) {
   id_ = id;
   timestamp_ = timestamp;
@@ -585,7 +585,6 @@
   num_channels_ = num_channels;
   energy_ = energy;
 
-  assert(num_channels >= 0);
   const size_t length = samples_per_channel * num_channels;
   assert(length <= kMaxDataSizeSamples);
   if (data != NULL) {
@@ -610,7 +609,6 @@
   energy_ = src.energy_;
   interleaved_ = src.interleaved_;
 
-  assert(num_channels_ >= 0);
   const size_t length = samples_per_channel_ * num_channels_;
   assert(length <= kMaxDataSizeSamples);
   memcpy(data_, src.data_, sizeof(int16_t) * length);
diff --git a/webrtc/modules/media_file/media_file_utility.cc b/webrtc/modules/media_file/media_file_utility.cc
index 1c2f7fd..8a815cc 100644
--- a/webrtc/modules/media_file/media_file_utility.cc
+++ b/webrtc/modules/media_file/media_file_utility.cc
@@ -253,7 +253,7 @@
 }
 
 int32_t ModuleFileUtility::InitWavCodec(uint32_t samplesPerSec,
-                                        uint32_t channels,
+                                        size_t channels,
                                         uint32_t bitsPerSample,
                                         uint32_t formatTag)
 {
@@ -663,8 +663,7 @@
         return -1;
     }
     _writing = false;
-    uint32_t channels = (codecInst.channels == 0) ?
-        1 : codecInst.channels;
+    size_t channels = (codecInst.channels == 0) ? 1 : codecInst.channels;
 
     if(STR_CASE_CMP(codecInst.plname, "PCMU") == 0)
     {
@@ -732,7 +731,7 @@
     OutStream& wav,
     uint32_t freq,
     size_t bytesPerSample,
-    uint32_t channels,
+    size_t channels,
     uint32_t format,
     size_t lengthInBytes)
 {
@@ -758,7 +757,7 @@
     {
         return -1;
     }
-    uint32_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
+    size_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
 
     if(STR_CASE_CMP(codec_info_.plname, "L16") == 0)
     {
@@ -861,13 +860,13 @@
     }
     _writing = true;
     _bytesWritten = 1;
-     out.Write(&_codecId, 1);
-     return 0;
+    out.Write(&_codecId, 1);
+    return 0;
 }
 
 int32_t ModuleFileUtility::WritePreEncodedData(
     OutStream& out,
-    const int8_t*  buffer,
+    const int8_t* buffer,
     const size_t dataLength)
 {
     WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
@@ -1010,7 +1009,7 @@
         (_codecId == kCodecIlbc30Ms))
     {
         size_t byteSize = 0;
-         if(_codecId == kCodecIlbc30Ms)
+        if(_codecId == kCodecIlbc30Ms)
         {
             byteSize = 50;
         }
diff --git a/webrtc/modules/media_file/media_file_utility.h b/webrtc/modules/media_file/media_file_utility.h
index 46ec340..bc2fa5a 100644
--- a/webrtc/modules/media_file/media_file_utility.h
+++ b/webrtc/modules/media_file/media_file_utility.h
@@ -180,7 +180,7 @@
 
 
     int32_t InitWavCodec(uint32_t samplesPerSec,
-                         uint32_t channels,
+                         size_t channels,
                          uint32_t bitsPerSample,
                          uint32_t formatTag);
 
@@ -196,7 +196,7 @@
     int32_t WriteWavHeader(OutStream& stream,
                            uint32_t freqInHz,
                            size_t bytesPerSample,
-                           uint32_t channels,
+                           size_t channels,
                            uint32_t format,
                            size_t lengthInBytes);
 
diff --git a/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h b/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h
index 8f58e02..fae8641 100644
--- a/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h
@@ -29,7 +29,7 @@
 
   virtual bool PayloadIsCompatible(const RtpUtility::Payload& payload,
                                    const uint32_t frequency,
-                                   const uint8_t channels,
+                                   const size_t channels,
                                    const uint32_t rate) const = 0;
 
   virtual void UpdatePayloadRate(RtpUtility::Payload* payload,
@@ -39,7 +39,7 @@
       const char payloadName[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payloadType,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate) const = 0;
 
   virtual int GetPayloadTypeFrequency(
@@ -61,7 +61,7 @@
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payload_type,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate,
       bool* created_new_payload_type);
 
@@ -71,7 +71,7 @@
   int32_t ReceivePayloadType(
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate,
       int8_t* payload_type) const;
 
@@ -173,7 +173,7 @@
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       const size_t payload_name_length,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate);
 
   bool IsRtxInternal(const RTPHeader& header) const;
diff --git a/webrtc/modules/rtp_rtcp/include/rtp_receiver.h b/webrtc/modules/rtp_rtcp/include/rtp_receiver.h
index d257a30..0640d5c 100644
--- a/webrtc/modules/rtp_rtcp/include/rtp_receiver.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_receiver.h
@@ -61,7 +61,7 @@
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payload_type,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate) = 0;
 
   // De-registers |payload_type| from the payload registry.
diff --git a/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
index d2ac62e..fad97f1 100644
--- a/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
@@ -35,7 +35,7 @@
 
 struct AudioPayload {
     uint32_t    frequency;
-    uint8_t     channels;
+    size_t      channels;
     uint32_t    rate;
 };
 
@@ -210,7 +210,7 @@
       const int8_t payloadType,
       const char payloadName[RTP_PAYLOAD_NAME_SIZE],
       const int frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate) = 0;
 
   virtual void OnIncomingSSRCChanged(const uint32_t ssrc) = 0;
@@ -333,7 +333,7 @@
   int32_t OnInitializeDecoder(const int8_t payloadType,
                               const char payloadName[RTP_PAYLOAD_NAME_SIZE],
                               const int frequency,
-                              const uint8_t channels,
+                              const size_t channels,
                               const uint32_t rate) override {
     return 0;
   }
diff --git a/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h b/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
index e6a76d0..011829c 100644
--- a/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
+++ b/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
@@ -23,7 +23,7 @@
   MOCK_CONST_METHOD4(PayloadIsCompatible,
                      bool(const RtpUtility::Payload& payload,
                           const uint32_t frequency,
-                          const uint8_t channels,
+                          const size_t channels,
                           const uint32_t rate));
   MOCK_CONST_METHOD2(UpdatePayloadRate,
                      void(RtpUtility::Payload* payload, const uint32_t rate));
@@ -34,7 +34,7 @@
       RtpUtility::Payload*(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
                            const int8_t payloadType,
                            const uint32_t frequency,
-                           const uint8_t channels,
+                           const size_t channels,
                            const uint32_t rate));
 };
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
index f7cdc4c..ce0bcd7 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
@@ -40,7 +40,7 @@
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     const int8_t payload_type,
     const uint32_t frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate,
     bool* created_new_payload) {
   assert(payload_type >= 0);
@@ -139,7 +139,7 @@
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     const size_t payload_name_length,
     const uint32_t frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate) {
   RtpUtility::PayloadTypeMap::iterator iterator = payload_type_map_.begin();
   for (; iterator != payload_type_map_.end(); ++iterator) {
@@ -171,7 +171,7 @@
 int32_t RTPPayloadRegistry::ReceivePayloadType(
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     const uint32_t frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate,
     int8_t* payload_type) const {
   assert(payload_type);
@@ -388,7 +388,7 @@
 
   bool PayloadIsCompatible(const RtpUtility::Payload& payload,
                            const uint32_t frequency,
-                           const uint8_t channels,
+                           const size_t channels,
                            const uint32_t rate) const override {
     return
         payload.audio &&
@@ -407,7 +407,7 @@
       const char payloadName[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payloadType,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate) const override {
     RtpUtility::Payload* payload = new RtpUtility::Payload;
     payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
@@ -431,7 +431,7 @@
 
   bool PayloadIsCompatible(const RtpUtility::Payload& payload,
                            const uint32_t frequency,
-                           const uint8_t channels,
+                           const size_t channels,
                            const uint32_t rate) const override {
     return !payload.audio;
   }
@@ -445,7 +445,7 @@
       const char payloadName[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payloadType,
       const uint32_t frequency,
-      const uint8_t channels,
+      const size_t channels,
       const uint32_t rate) const override {
     RtpVideoCodecTypes videoType = kRtpVideoGeneric;
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
index f19c3da..b73666d 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
@@ -25,7 +25,7 @@
 using ::testing::_;
 
 static const char* kTypicalPayloadName = "name";
-static const uint8_t kTypicalChannels = 1;
+static const size_t kTypicalChannels = 1;
 static const int kTypicalFrequency = 44000;
 static const int kTypicalRate = 32 * 1024;
 
@@ -90,7 +90,7 @@
 TEST_F(RtpPayloadRegistryTest, AudioRedWorkProperly) {
   const uint8_t kRedPayloadType = 127;
   const int kRedSampleRate = 8000;
-  const int kRedChannels = 1;
+  const size_t kRedChannels = 1;
   const int kRedBitRate = 0;
 
   // This creates an audio RTP payload strategy.
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
index 93b115a..2e21f23 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
@@ -96,7 +96,7 @@
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     const int8_t payload_type,
     const uint32_t frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate) {
   CriticalSectionScoped lock(critical_section_rtp_receiver_.get());
 
@@ -252,7 +252,7 @@
   bool new_ssrc = false;
   bool re_initialize_decoder = false;
   char payload_name[RTP_PAYLOAD_NAME_SIZE];
-  uint8_t channels = 1;
+  size_t channels = 1;
   uint32_t rate = 0;
 
   {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
index eaa07d9..5cf94c2 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
@@ -36,7 +36,7 @@
   int32_t RegisterReceivePayload(const char payload_name[RTP_PAYLOAD_NAME_SIZE],
                                  const int8_t payload_type,
                                  const uint32_t frequency,
-                                 const uint8_t channels,
+                                 const size_t channels,
                                  const uint32_t rate) override;
 
   int32_t DeRegisterReceivePayload(const int8_t payload_type) override;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 6ad666b..f4933af 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -296,7 +296,7 @@
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     int8_t payload_number,
     uint32_t frequency,
-    uint8_t channels,
+    size_t channels,
     uint32_t rate) {
   assert(payload_name);
   CriticalSectionScoped cs(send_critsect_.get());
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.h b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
index 2aa7f4c..3c62336 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
@@ -116,7 +116,7 @@
   int32_t RegisterPayload(
       const char payload_name[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payload_type, const uint32_t frequency,
-      const uint8_t channels, const uint32_t rate);
+      const size_t channels, const uint32_t rate);
 
   int32_t DeRegisterSendPayload(const int8_t payload_type);
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
index d361443..2aa4961 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -66,7 +66,7 @@
     const char payloadName[RTP_PAYLOAD_NAME_SIZE],
     const int8_t payloadType,
     const uint32_t frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate,
     RtpUtility::Payload** payload) {
   if (RtpUtility::StringCompare(payloadName, "cn", 2)) {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
index a3cee5e..1e96d17 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -29,7 +29,7 @@
   int32_t RegisterAudioPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
                                int8_t payloadType,
                                uint32_t frequency,
-                               uint8_t channels,
+                               size_t channels,
                                uint32_t rate,
                                RtpUtility::Payload** payload);
 
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
index 69ed843..634969b 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
@@ -64,7 +64,7 @@
   int32_t OnInitializeDecoder(const int8_t payloadType,
                               const char payloadName[RTP_PAYLOAD_NAME_SIZE],
                               const int frequency,
-                              const uint8_t channels,
+                              const size_t channels,
                               const uint32_t rate) override {
     if (payloadType == 96) {
       EXPECT_EQ(test_rate, rate) <<
diff --git a/webrtc/system_wrappers/include/aligned_array.h b/webrtc/system_wrappers/include/aligned_array.h
index e985e88..a2ffe99 100644
--- a/webrtc/system_wrappers/include/aligned_array.h
+++ b/webrtc/system_wrappers/include/aligned_array.h
@@ -20,20 +20,20 @@
 // aligned to the given byte alignment.
 template<typename T> class AlignedArray {
  public:
-  AlignedArray(int rows, size_t cols, size_t alignment)
+  AlignedArray(size_t rows, size_t cols, size_t alignment)
       : rows_(rows),
         cols_(cols) {
     RTC_CHECK_GT(alignment, 0u);
     head_row_ = static_cast<T**>(AlignedMalloc(rows_ * sizeof(*head_row_),
                                                alignment));
-    for (int i = 0; i < rows_; ++i) {
+    for (size_t i = 0; i < rows_; ++i) {
       head_row_[i] = static_cast<T*>(AlignedMalloc(cols_ * sizeof(**head_row_),
                                                    alignment));
     }
   }
 
   ~AlignedArray() {
-    for (int i = 0; i < rows_; ++i) {
+    for (size_t i = 0; i < rows_; ++i) {
       AlignedFree(head_row_[i]);
     }
     AlignedFree(head_row_);
@@ -47,27 +47,27 @@
     return head_row_;
   }
 
-  T* Row(int row) {
+  T* Row(size_t row) {
     RTC_CHECK_LE(row, rows_);
     return head_row_[row];
   }
 
-  const T* Row(int row) const {
+  const T* Row(size_t row) const {
     RTC_CHECK_LE(row, rows_);
     return head_row_[row];
   }
 
-  T& At(int row, size_t col) {
+  T& At(size_t row, size_t col) {
     RTC_CHECK_LE(col, cols_);
     return Row(row)[col];
   }
 
-  const T& At(int row, size_t col) const {
+  const T& At(size_t row, size_t col) const {
     RTC_CHECK_LE(col, cols_);
     return Row(row)[col];
   }
 
-  int rows() const {
+  size_t rows() const {
     return rows_;
   }
 
@@ -76,7 +76,7 @@
   }
 
  private:
-  int rows_;
+  size_t rows_;
   size_t cols_;
   T** head_row_;
 };
diff --git a/webrtc/system_wrappers/source/aligned_array_unittest.cc b/webrtc/system_wrappers/source/aligned_array_unittest.cc
index eb3ad88..01238f8 100644
--- a/webrtc/system_wrappers/source/aligned_array_unittest.cc
+++ b/webrtc/system_wrappers/source/aligned_array_unittest.cc
@@ -27,7 +27,7 @@
 TEST(AlignedArrayTest, CheckAlignment) {
   AlignedArray<bool> arr(10, 7, 128);
   ASSERT_TRUE(IsAligned(arr.Array(), 128));
-  for (int i = 0; i < 10; ++i) {
+  for (size_t i = 0; i < 10; ++i) {
     ASSERT_TRUE(IsAligned(arr.Row(i), 128));
     ASSERT_EQ(arr.Row(i), arr.Array()[i]);
   }
@@ -36,13 +36,13 @@
 TEST(AlignedArrayTest, CheckOverlap) {
   AlignedArray<size_t> arr(10, 7, 128);
 
-  for (int i = 0; i < 10; ++i) {
+  for (size_t i = 0; i < 10; ++i) {
     for (size_t j = 0; j < 7; ++j) {
       arr.At(i, j) = 20 * i + j;
     }
   }
 
-  for (int i = 0; i < 10; ++i) {
+  for (size_t i = 0; i < 10; ++i) {
     for (size_t j = 0; j < 7; ++j) {
       ASSERT_EQ(arr.At(i, j), 20 * i + j);
       ASSERT_EQ(arr.Row(i)[j], 20 * i + j);
@@ -53,7 +53,7 @@
 
 TEST(AlignedArrayTest, CheckRowsCols) {
   AlignedArray<bool> arr(10, 7, 128);
-  ASSERT_EQ(arr.rows(), 10);
+  ASSERT_EQ(arr.rows(), 10u);
   ASSERT_EQ(arr.cols(), 7u);
 }
 
diff --git a/webrtc/tools/agc/activity_metric.cc b/webrtc/tools/agc/activity_metric.cc
index 55280fb..2cb0a1b 100644
--- a/webrtc/tools/agc/activity_metric.cc
+++ b/webrtc/tools/agc/activity_metric.cc
@@ -56,7 +56,7 @@
 // silence frame. Otherwise true VAD would drift with respect to the audio.
 // We only consider mono inputs.
 static void DitherSilence(AudioFrame* frame) {
-  ASSERT_EQ(1, frame->num_channels_);
+  ASSERT_EQ(1u, frame->num_channels_);
   const double kRmsSilence = 5;
   const double sum_squared_silence = kRmsSilence * kRmsSilence *
       frame->samples_per_channel_;
diff --git a/webrtc/tools/agc/agc_harness.cc b/webrtc/tools/agc/agc_harness.cc
index 3dcc954..0d35d4b 100644
--- a/webrtc/tools/agc/agc_harness.cc
+++ b/webrtc/tools/agc/agc_harness.cc
@@ -12,6 +12,7 @@
 
 #include "gflags/gflags.h"
 #include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/system_wrappers/include/sleep.h"
 #include "webrtc/system_wrappers/include/trace.h"
@@ -176,8 +177,8 @@
     printf("Codecs:\n");
     for (int i = 0; i < codec_->NumOfCodecs(); i++) {
       RTC_CHECK_EQ(0, codec_->GetCodec(i, params));
-      printf("%d %s/%d/%d\n", params.pltype, params.plname, params.plfreq,
-             params.channels);
+      printf("%d %s/%d/%" PRIuS "\n", params.pltype, params.plname,
+             params.plfreq, params.channels);
     }
   }
 
diff --git a/webrtc/video/vie_channel.cc b/webrtc/video/vie_channel.cc
index 401cba8..bc23c9d 100644
--- a/webrtc/video/vie_channel.cc
+++ b/webrtc/video/vie_channel.cc
@@ -1184,7 +1184,7 @@
     const int8_t payload_type,
     const char payload_name[RTP_PAYLOAD_NAME_SIZE],
     const int frequency,
-    const uint8_t channels,
+    const size_t channels,
     const uint32_t rate) {
   LOG(LS_INFO) << "OnInitializeDecoder " << static_cast<int>(payload_type)
                << " " << payload_name;
diff --git a/webrtc/video/vie_channel.h b/webrtc/video/vie_channel.h
index 25d06b9..4ba3948 100644
--- a/webrtc/video/vie_channel.h
+++ b/webrtc/video/vie_channel.h
@@ -190,7 +190,7 @@
   int32_t OnInitializeDecoder(const int8_t payload_type,
                               const char payload_name[RTP_PAYLOAD_NAME_SIZE],
                               const int frequency,
-                              const uint8_t channels,
+                              const size_t channels,
                               const uint32_t rate) override;
   void OnIncomingSSRCChanged(const uint32_t ssrc) override;
   void OnIncomingCSRCChanged(const uint32_t CSRC, const bool added) override;
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 64b40a8..b1b55e8 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -419,11 +419,11 @@
     int8_t payloadType,
     const char payloadName[RTP_PAYLOAD_NAME_SIZE],
     int frequency,
-    uint8_t channels,
+    size_t channels,
     uint32_t rate) {
     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
                  "Channel::OnInitializeDecoder(payloadType=%d, "
-                 "payloadName=%s, frequency=%u, channels=%u, rate=%u)",
+                 "payloadName=%s, frequency=%u, channels=%" PRIuS ", rate=%u)",
                  payloadType, payloadName, frequency, channels, rate);
 
     CodecInst receiveCodec = {0};
@@ -459,7 +459,7 @@
 {
     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
                  "Channel::OnReceivedPayloadData(payloadSize=%" PRIuS ","
-                 " payloadType=%u, audioChannel=%u)",
+                 " payloadType=%u, audioChannel=%" PRIuS ")",
                  payloadSize,
                  rtpHeader->header.payloadType,
                  rtpHeader->type.Audio.channel);
@@ -1079,8 +1079,8 @@
         {
             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
                          VoEId(_instanceId,_channelId),
-                         "Channel::Init() unable to register %s (%d/%d/%d/%d) "
-                         "to RTP/RTCP receiver",
+                         "Channel::Init() unable to register %s "
+                         "(%d/%d/%" PRIuS "/%d) to RTP/RTCP receiver",
                          codec.plname, codec.pltype, codec.plfreq,
                          codec.channels, codec.rate);
         }
@@ -1088,8 +1088,8 @@
         {
             WEBRTC_TRACE(kTraceInfo, kTraceVoice,
                          VoEId(_instanceId,_channelId),
-                         "Channel::Init() %s (%d/%d/%d/%d) has been added to "
-                         "the RTP/RTCP receiver",
+                         "Channel::Init() %s (%d/%d/%" PRIuS "/%d) has been "
+                         "added to the RTP/RTCP receiver",
                          codec.plname, codec.pltype, codec.plfreq,
                          codec.channels, codec.rate);
         }
@@ -1591,7 +1591,7 @@
 
     CodecInst codec;
     int32_t samplingFreqHz(-1);
-    const int kMono = 1;
+    const size_t kMono = 1;
     if (frequency == kFreq32000Hz)
         samplingFreqHz = 32000;
     else if (frequency == kFreq16000Hz)
@@ -3355,7 +3355,7 @@
 void Channel::Demultiplex(const int16_t* audio_data,
                           int sample_rate,
                           size_t number_of_frames,
-                          int number_of_channels) {
+                          size_t number_of_channels) {
   CodecInst codec;
   GetSendCodec(codec);
 
@@ -3842,7 +3842,7 @@
             sample < _audioFrame.samples_per_channel_;
             sample++)
         {
-            for (int channel = 0;
+            for (size_t channel = 0;
                 channel < _audioFrame.num_channels_;
                 channel++)
             {
@@ -3976,7 +3976,8 @@
                          kTraceVoice,
                          VoEId(_instanceId, _channelId),
                          "Channel::RegisterReceiveCodecsToRTPModule() unable"
-                         " to register %s (%d/%d/%d/%d) to RTP/RTCP receiver",
+                         " to register %s (%d/%d/%" PRIuS "/%d) to RTP/RTCP "
+                         "receiver",
                          codec.plname, codec.pltype, codec.plfreq,
                          codec.channels, codec.rate);
         }
@@ -3986,7 +3987,7 @@
                          kTraceVoice,
                          VoEId(_instanceId, _channelId),
                          "Channel::RegisterReceiveCodecsToRTPModule() %s "
-                         "(%d/%d/%d/%d) has been added to the RTP/RTCP "
+                         "(%d/%d/%" PRIuS "/%d) has been added to the RTP/RTCP "
                          "receiver",
                          codec.plname, codec.pltype, codec.plfreq,
                          codec.channels, codec.rate);
diff --git a/webrtc/voice_engine/channel.h b/webrtc/voice_engine/channel.h
index 9184b93..d15f9db 100644
--- a/webrtc/voice_engine/channel.h
+++ b/webrtc/voice_engine/channel.h
@@ -381,7 +381,7 @@
     int32_t OnInitializeDecoder(int8_t payloadType,
                                 const char payloadName[RTP_PAYLOAD_NAME_SIZE],
                                 int frequency,
-                                uint8_t channels,
+                                size_t channels,
                                 uint32_t rate) override;
     void OnIncomingSSRCChanged(uint32_t ssrc) override;
     void OnIncomingCSRCChanged(uint32_t CSRC, bool added) override;
@@ -451,7 +451,7 @@
     void Demultiplex(const int16_t* audio_data,
                      int sample_rate,
                      size_t number_of_frames,
-                     int number_of_channels);
+                     size_t number_of_channels);
     uint32_t PrepareEncodeAndSend(int mixingFrequency);
     uint32_t EncodeAndSend();
 
diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc
index d3a4f7f..0dacf35 100644
--- a/webrtc/voice_engine/output_mixer.cc
+++ b/webrtc/voice_engine/output_mixer.cc
@@ -10,6 +10,7 @@
 
 #include "webrtc/voice_engine/output_mixer.h"
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/utility/include/audio_frame_operations.h"
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -462,11 +463,12 @@
 }
 
 int OutputMixer::GetMixedAudio(int sample_rate_hz,
-                               int num_channels,
+                               size_t num_channels,
                                AudioFrame* frame) {
-  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
-               "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%d)",
-               sample_rate_hz, num_channels);
+  WEBRTC_TRACE(
+      kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+      "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")",
+      sample_rate_hz, num_channels);
 
   // --- Record playout if enabled
   {
diff --git a/webrtc/voice_engine/output_mixer.h b/webrtc/voice_engine/output_mixer.h
index c042a03..91387e6 100644
--- a/webrtc/voice_engine/output_mixer.h
+++ b/webrtc/voice_engine/output_mixer.h
@@ -63,7 +63,7 @@
     int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant,
                                          bool mixable);
 
-    int GetMixedAudio(int sample_rate_hz, int num_channels,
+    int GetMixedAudio(int sample_rate_hz, size_t num_channels,
                       AudioFrame* audioFrame);
 
     // VoEVolumeControl
diff --git a/webrtc/voice_engine/test/auto_test/standard/codec_test.cc b/webrtc/voice_engine/test/auto_test/standard/codec_test.cc
index 5ab6d58..3a3d830 100644
--- a/webrtc/voice_engine/test/auto_test/standard/codec_test.cc
+++ b/webrtc/voice_engine/test/auto_test/standard/codec_test.cc
@@ -50,7 +50,7 @@
 
 TEST_F(CodecTest, PcmuIsDefaultCodecAndHasTheRightValues) {
   EXPECT_EQ(0, voe_codec_->GetSendCodec(channel_, codec_instance_));
-  EXPECT_EQ(1, codec_instance_.channels);
+  EXPECT_EQ(1u, codec_instance_.channels);
   EXPECT_EQ(160, codec_instance_.pacsize);
   EXPECT_EQ(8000, codec_instance_.plfreq);
   EXPECT_EQ(0, codec_instance_.pltype);
diff --git a/webrtc/voice_engine/test/cmd_test/voe_cmd_test.cc b/webrtc/voice_engine/test/cmd_test/voe_cmd_test.cc
index 0021a4b..ccfe3c2 100644
--- a/webrtc/voice_engine/test/cmd_test/voe_cmd_test.cc
+++ b/webrtc/voice_engine/test/cmd_test/voe_cmd_test.cc
@@ -19,6 +19,7 @@
 
 #include "gflags/gflags.h"
 #include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/call/rtc_event_log.h"
 #include "webrtc/engine_configurations.h"
@@ -113,8 +114,8 @@
     int res = codec->GetCodec(i, codec_params);
     VALIDATE;
     SetStereoIfOpus(opus_stereo, &codec_params);
-    printf("%2d. %3d  %s/%d/%d \n", i, codec_params.pltype, codec_params.plname,
-           codec_params.plfreq, codec_params.channels);
+    printf("%2d. %3d  %s/%d/%" PRIuS " \n", i, codec_params.pltype,
+           codec_params.plname, codec_params.plfreq, codec_params.channels);
   }
 }
 
diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc
index 2241f46..1204b04 100644
--- a/webrtc/voice_engine/transmit_mixer.cc
+++ b/webrtc/voice_engine/transmit_mixer.cc
@@ -300,7 +300,8 @@
     return 0;
 }
 
-void TransmitMixer::GetSendCodecInfo(int* max_sample_rate, int* max_channels) {
+void TransmitMixer::GetSendCodecInfo(int* max_sample_rate,
+                                     size_t* max_channels) {
   *max_sample_rate = 8000;
   *max_channels = 1;
   for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
@@ -318,7 +319,7 @@
 int32_t
 TransmitMixer::PrepareDemux(const void* audioSamples,
                             size_t nSamples,
-                            uint8_t nChannels,
+                            size_t nChannels,
                             uint32_t samplesPerSec,
                             uint16_t totalDelayMS,
                             int32_t clockDrift,
@@ -327,7 +328,7 @@
 {
     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
                  "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
-                 "nChannels=%u, samplesPerSec=%u, totalDelayMS=%u, "
+                 "nChannels=%" PRIuS ", samplesPerSec=%u, totalDelayMS=%u, "
                  "clockDrift=%d, currentMicLevel=%u)",
                  nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
                  currentMicLevel);
@@ -432,8 +433,8 @@
 }
 
 void TransmitMixer::DemuxAndMix(const int voe_channels[],
-                                int number_of_voe_channels) {
-  for (int i = 0; i < number_of_voe_channels; ++i) {
+                                size_t number_of_voe_channels) {
+  for (size_t i = 0; i < number_of_voe_channels; ++i) {
     voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
     voe::Channel* channel_ptr = ch.channel();
     if (channel_ptr) {
@@ -465,8 +466,8 @@
 }
 
 void TransmitMixer::EncodeAndSend(const int voe_channels[],
-                                  int number_of_voe_channels) {
-  for (int i = 0; i < number_of_voe_channels; ++i) {
+                                  size_t number_of_voe_channels) {
+  for (size_t i = 0; i < number_of_voe_channels; ++i) {
     voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
     voe::Channel* channel_ptr = ch.channel();
     if (channel_ptr && channel_ptr->Sending())
@@ -698,8 +699,7 @@
     const uint32_t notificationTime(0); // Not supported in VoE
     CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
 
-    if (codecInst != NULL &&
-      (codecInst->channels < 0 || codecInst->channels > 2))
+    if (codecInst != NULL && codecInst->channels > 2)
     {
         _engineStatisticsPtr->SetLastError(
             VE_BAD_ARGUMENT, kTraceError,
@@ -1133,10 +1133,10 @@
 
 void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
                                        size_t samples_per_channel,
-                                       int num_channels,
+                                       size_t num_channels,
                                        int sample_rate_hz) {
   int codec_rate;
-  int num_codec_channels;
+  size_t num_codec_channels;
   GetSendCodecInfo(&codec_rate, &num_codec_channels);
   stereo_codec_ = num_codec_channels == 2;
 
diff --git a/webrtc/voice_engine/transmit_mixer.h b/webrtc/voice_engine/transmit_mixer.h
index 071d91d..0aee106 100644
--- a/webrtc/voice_engine/transmit_mixer.h
+++ b/webrtc/voice_engine/transmit_mixer.h
@@ -52,7 +52,7 @@
 
     int32_t PrepareDemux(const void* audioSamples,
                          size_t nSamples,
-                         uint8_t  nChannels,
+                         size_t nChannels,
                          uint32_t samplesPerSec,
                          uint16_t totalDelayMS,
                          int32_t  clockDrift,
@@ -63,12 +63,12 @@
     int32_t DemuxAndMix();
     // Used by the Chrome to pass the recording data to the specific VoE
     // channels for demux.
-    void DemuxAndMix(const int voe_channels[], int number_of_voe_channels);
+    void DemuxAndMix(const int voe_channels[], size_t number_of_voe_channels);
 
     int32_t EncodeAndSend();
     // Used by the Chrome to pass the recording data to the specific VoE
     // channels for encoding and sending to the network.
-    void EncodeAndSend(const int voe_channels[], int number_of_voe_channels);
+    void EncodeAndSend(const int voe_channels[], size_t number_of_voe_channels);
 
     // Must be called on the same thread as PrepareDemux().
     uint32_t CaptureLevel() const;
@@ -170,11 +170,11 @@
 
     // Gets the maximum sample rate and number of channels over all currently
     // sending codecs.
-    void GetSendCodecInfo(int* max_sample_rate, int* max_channels);
+    void GetSendCodecInfo(int* max_sample_rate, size_t* max_channels);
 
     void GenerateAudioFrame(const int16_t audioSamples[],
                             size_t nSamples,
-                            int nChannels,
+                            size_t nChannels,
                             int samplesPerSec);
     int32_t RecordAudioToFile(uint32_t mixingFrequency);
 
diff --git a/webrtc/voice_engine/utility.cc b/webrtc/voice_engine/utility.cc
index eb442ec..605e553 100644
--- a/webrtc/voice_engine/utility.cc
+++ b/webrtc/voice_engine/utility.cc
@@ -34,12 +34,12 @@
 
 void RemixAndResample(const int16_t* src_data,
                       size_t samples_per_channel,
-                      int num_channels,
+                      size_t num_channels,
                       int sample_rate_hz,
                       PushResampler<int16_t>* resampler,
                       AudioFrame* dst_frame) {
   const int16_t* audio_ptr = src_data;
-  int audio_ptr_num_channels = num_channels;
+  size_t audio_ptr_num_channels = num_channels;
   int16_t mono_audio[AudioFrame::kMaxDataSizeSamples];
 
   // Downmix before resampling.
@@ -68,8 +68,7 @@
                   << ", dst_frame->data_ = " << dst_frame->data_;
     assert(false);
   }
-  dst_frame->samples_per_channel_ =
-      static_cast<size_t>(out_length / audio_ptr_num_channels);
+  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
 
   // Upmix after resampling.
   if (num_channels == 1 && dst_frame->num_channels_ == 2) {
@@ -81,9 +80,9 @@
 }
 
 void MixWithSat(int16_t target[],
-                int target_channel,
+                size_t target_channel,
                 const int16_t source[],
-                int source_channel,
+                size_t source_channel,
                 size_t source_len) {
   assert(target_channel == 1 || target_channel == 2);
   assert(source_channel == 1 || source_channel == 2);
diff --git a/webrtc/voice_engine/utility.h b/webrtc/voice_engine/utility.h
index cc44533..4139f05 100644
--- a/webrtc/voice_engine/utility.h
+++ b/webrtc/voice_engine/utility.h
@@ -40,15 +40,15 @@
 // parameters.
 void RemixAndResample(const int16_t* src_data,
                       size_t samples_per_channel,
-                      int num_channels,
+                      size_t num_channels,
                       int sample_rate_hz,
                       PushResampler<int16_t>* resampler,
                       AudioFrame* dst_frame);
 
 void MixWithSat(int16_t target[],
-                int target_channel,
+                size_t target_channel,
                 const int16_t source[],
-                int source_channel,
+                size_t source_channel,
                 size_t source_len);
 
 }  // namespace voe
diff --git a/webrtc/voice_engine/voe_base_impl.cc b/webrtc/voice_engine/voe_base_impl.cc
index 0b6c13c..3e5cfbb 100644
--- a/webrtc/voice_engine/voe_base_impl.cc
+++ b/webrtc/voice_engine/voe_base_impl.cc
@@ -82,7 +82,7 @@
 int32_t VoEBaseImpl::RecordedDataIsAvailable(const void* audioSamples,
                                              const size_t nSamples,
                                              const size_t nBytesPerSample,
-                                             const uint8_t nChannels,
+                                             const size_t nChannels,
                                              const uint32_t samplesPerSec,
                                              const uint32_t totalDelayMS,
                                              const int32_t clockDrift,
@@ -97,23 +97,22 @@
 
 int32_t VoEBaseImpl::NeedMorePlayData(const size_t nSamples,
                                       const size_t nBytesPerSample,
-                                      const uint8_t nChannels,
+                                      const size_t nChannels,
                                       const uint32_t samplesPerSec,
                                       void* audioSamples,
                                       size_t& nSamplesOut,
                                       int64_t* elapsed_time_ms,
                                       int64_t* ntp_time_ms) {
-  GetPlayoutData(static_cast<int>(samplesPerSec), static_cast<int>(nChannels),
-                 nSamples, true, audioSamples,
-                 elapsed_time_ms, ntp_time_ms);
+  GetPlayoutData(static_cast<int>(samplesPerSec), nChannels, nSamples, true,
+                 audioSamples, elapsed_time_ms, ntp_time_ms);
   nSamplesOut = audioFrame_.samples_per_channel_;
   return 0;
 }
 
 int VoEBaseImpl::OnDataAvailable(const int voe_channels[],
-                                 int number_of_voe_channels,
+                                 size_t number_of_voe_channels,
                                  const int16_t* audio_data, int sample_rate,
-                                 int number_of_channels,
+                                 size_t number_of_channels,
                                  size_t number_of_frames,
                                  int audio_delay_milliseconds, int volume,
                                  bool key_pressed, bool need_audio_processing) {
@@ -128,7 +127,7 @@
 
   // No need to go through the APM, demultiplex the data to each VoE channel,
   // encode and send to the network.
-  for (int i = 0; i < number_of_voe_channels; ++i) {
+  for (size_t i = 0; i < number_of_voe_channels; ++i) {
     // TODO(ajm): In the case where multiple channels are using the same codec
     // rate, this path needlessly does extra conversions. We should convert once
     // and share between channels.
@@ -142,14 +141,14 @@
 
 void VoEBaseImpl::OnData(int voe_channel, const void* audio_data,
                          int bits_per_sample, int sample_rate,
-                         int number_of_channels, size_t number_of_frames) {
+                         size_t number_of_channels, size_t number_of_frames) {
   PushCaptureData(voe_channel, audio_data, bits_per_sample, sample_rate,
                   number_of_channels, number_of_frames);
 }
 
 void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data,
                                   int bits_per_sample, int sample_rate,
-                                  int number_of_channels,
+                                  size_t number_of_channels,
                                   size_t number_of_frames) {
   voe::ChannelOwner ch = shared_->channel_manager().GetChannel(voe_channel);
   voe::Channel* channel_ptr = ch.channel();
@@ -165,7 +164,7 @@
 
 void VoEBaseImpl::PullRenderData(int bits_per_sample,
                                  int sample_rate,
-                                 int number_of_channels,
+                                 size_t number_of_channels,
                                  size_t number_of_frames,
                                  void* audio_data, int64_t* elapsed_time_ms,
                                  int64_t* ntp_time_ms) {
@@ -699,8 +698,8 @@
 }
 
 int VoEBaseImpl::ProcessRecordedDataWithAPM(
-    const int voe_channels[], int number_of_voe_channels,
-    const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels,
+    const int voe_channels[], size_t number_of_voe_channels,
+    const void* audio_data, uint32_t sample_rate, size_t number_of_channels,
     size_t number_of_frames, uint32_t audio_delay_milliseconds,
     int32_t clock_drift, uint32_t volume, bool key_pressed) {
   assert(shared_->transmit_mixer() != nullptr);
@@ -765,7 +764,7 @@
   return 0;
 }
 
-void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels,
+void VoEBaseImpl::GetPlayoutData(int sample_rate, size_t number_of_channels,
                                  size_t number_of_frames, bool feed_data_to_apm,
                                  void* audio_data, int64_t* elapsed_time_ms,
                                  int64_t* ntp_time_ms) {
diff --git a/webrtc/voice_engine/voe_base_impl.h b/webrtc/voice_engine/voe_base_impl.h
index 167aacd..58e0387 100644
--- a/webrtc/voice_engine/voe_base_impl.h
+++ b/webrtc/voice_engine/voe_base_impl.h
@@ -57,7 +57,7 @@
   int32_t RecordedDataIsAvailable(const void* audioSamples,
                                   const size_t nSamples,
                                   const size_t nBytesPerSample,
-                                  const uint8_t nChannels,
+                                  const size_t nChannels,
                                   const uint32_t samplesPerSec,
                                   const uint32_t totalDelayMS,
                                   const int32_t clockDrift,
@@ -66,17 +66,17 @@
                                   uint32_t& newMicLevel) override;
   int32_t NeedMorePlayData(const size_t nSamples,
                            const size_t nBytesPerSample,
-                           const uint8_t nChannels,
+                           const size_t nChannels,
                            const uint32_t samplesPerSec,
                            void* audioSamples,
                            size_t& nSamplesOut,
                            int64_t* elapsed_time_ms,
                            int64_t* ntp_time_ms) override;
   int OnDataAvailable(const int voe_channels[],
-                      int number_of_voe_channels,
+                      size_t number_of_voe_channels,
                       const int16_t* audio_data,
                       int sample_rate,
-                      int number_of_channels,
+                      size_t number_of_channels,
                       size_t number_of_frames,
                       int audio_delay_milliseconds,
                       int current_volume,
@@ -86,17 +86,17 @@
               const void* audio_data,
               int bits_per_sample,
               int sample_rate,
-              int number_of_channels,
+              size_t number_of_channels,
               size_t number_of_frames) override;
   void PushCaptureData(int voe_channel,
                        const void* audio_data,
                        int bits_per_sample,
                        int sample_rate,
-                       int number_of_channels,
+                       size_t number_of_channels,
                        size_t number_of_frames) override;
   void PullRenderData(int bits_per_sample,
                       int sample_rate,
-                      int number_of_channels,
+                      size_t number_of_channels,
                       size_t number_of_frames,
                       void* audio_data,
                       int64_t* elapsed_time_ms,
@@ -124,12 +124,12 @@
   // It returns new AGC microphone volume or 0 if no volume changes
   // should be done.
   int ProcessRecordedDataWithAPM(
-      const int voe_channels[], int number_of_voe_channels,
-      const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels,
+      const int voe_channels[], size_t number_of_voe_channels,
+      const void* audio_data, uint32_t sample_rate, size_t number_of_channels,
       size_t number_of_frames, uint32_t audio_delay_milliseconds,
       int32_t clock_drift, uint32_t volume, bool key_pressed);
 
-  void GetPlayoutData(int sample_rate, int number_of_channels,
+  void GetPlayoutData(int sample_rate, size_t number_of_channels,
                       size_t number_of_frames, bool feed_data_to_apm,
                       void* audio_data, int64_t* elapsed_time_ms,
                       int64_t* ntp_time_ms);
diff --git a/webrtc/voice_engine/voe_codec_impl.cc b/webrtc/voice_engine/voe_codec_impl.cc
index 162f1c2..6eb11b7 100644
--- a/webrtc/voice_engine/voe_codec_impl.cc
+++ b/webrtc/voice_engine/voe_codec_impl.cc
@@ -10,6 +10,7 @@
 
 #include "webrtc/voice_engine/voe_codec_impl.h"
 
+#include "webrtc/base/format_macros.h"
 #include "webrtc/modules/audio_coding/include/audio_coding_module.h"
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/include/trace.h"
@@ -64,7 +65,7 @@
                "SetSendCodec(channel=%d, codec)", channel);
   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
-               "channels=%d, rate=%d",
+               "channels=%" PRIuS ", rate=%d",
                codec.plname, codec.pacsize, codec.plfreq, codec.pltype,
                codec.channels, codec.rate);
   if (!_shared->statistics().Initialized()) {
@@ -161,7 +162,7 @@
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetRecPayloadType(channel=%d, codec)", channel);
   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-               "codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
+               "codec: plname=%s, plfreq=%d, pltype=%d, channels=%" PRIuS ", "
                "pacsize=%d, rate=%d",
                codec.plname, codec.plfreq, codec.pltype, codec.channels,
                codec.pacsize, codec.rate);