Introduced the new locking scheme

BUG=webrtc:5099

Review URL: https://codereview.webrtc.org/1424663003

Cr-Commit-Position: refs/heads/master@{#10836}
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index 75bfb20..48f23b1 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -37,7 +37,6 @@
 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
 #include "webrtc/modules/audio_processing/voice_detection_impl.h"
 #include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/include/file_wrapper.h"
 #include "webrtc/system_wrappers/include/logging.h"
 #include "webrtc/system_wrappers/include/metrics.h"
@@ -75,9 +74,41 @@
   assert(false);
   return false;
 }
-
 }  // namespace
 
+struct AudioProcessingImpl::ApmPublicSubmodules {
+  ApmPublicSubmodules()
+      : echo_cancellation(nullptr),
+        echo_control_mobile(nullptr),
+        gain_control(nullptr),
+        high_pass_filter(nullptr),
+        level_estimator(nullptr),
+        noise_suppression(nullptr),
+        voice_detection(nullptr) {}
+  // Accessed externally of APM without any lock acquired.
+  EchoCancellationImpl* echo_cancellation;
+  EchoControlMobileImpl* echo_control_mobile;
+  GainControlImpl* gain_control;
+  HighPassFilterImpl* high_pass_filter;
+  LevelEstimatorImpl* level_estimator;
+  NoiseSuppressionImpl* noise_suppression;
+  VoiceDetectionImpl* voice_detection;
+  rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc;
+
+  // Accessed internally from both render and capture.
+  rtc::scoped_ptr<TransientSuppressor> transient_suppressor;
+  rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer;
+};
+
+struct AudioProcessingImpl::ApmPrivateSubmodules {
+  explicit ApmPrivateSubmodules(Beamformer<float>* beamformer)
+      : beamformer(beamformer) {}
+  // Accessed internally from capture or during initialization
+  std::list<ProcessingComponent*> component_list;
+  rtc::scoped_ptr<Beamformer<float>> beamformer;
+  rtc::scoped_ptr<AgcManagerDirect> agc_manager;
+};
+
 // Throughout webrtc, it's assumed that success is represented by zero.
 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
 
@@ -172,7 +203,7 @@
   AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
   if (apm->Initialize() != kNoError) {
     delete apm;
-    apm = NULL;
+    apm = nullptr;
   }
 
   return apm;
@@ -183,98 +214,90 @@
 
 AudioProcessingImpl::AudioProcessingImpl(const Config& config,
                                          Beamformer<float>* beamformer)
-    : echo_cancellation_(NULL),
-      echo_control_mobile_(NULL),
-      gain_control_(NULL),
-      high_pass_filter_(NULL),
-      level_estimator_(NULL),
-      noise_suppression_(NULL),
-      voice_detection_(NULL),
-      crit_(CriticalSectionWrapper::CreateCriticalSection()),
-#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-      debug_file_(FileWrapper::Create()),
-      event_msg_(new audioproc::Event()),
-#endif
-      fwd_proc_format_(kSampleRate16kHz),
-      rev_proc_format_(kSampleRate16kHz, 1),
-      split_rate_(kSampleRate16kHz),
-      stream_delay_ms_(0),
-      delay_offset_ms_(0),
-      was_stream_delay_set_(false),
-      last_stream_delay_ms_(0),
-      last_aec_system_delay_ms_(0),
-      stream_delay_jumps_(-1),
-      aec_system_delay_jumps_(-1),
-      output_will_be_muted_(false),
-      key_pressed_(false),
+    : public_submodules_(new ApmPublicSubmodules()),
+      private_submodules_(new ApmPrivateSubmodules(beamformer)),
+      constants_(config.Get<ExperimentalAgc>().startup_min_volume,
+                 config.Get<Beamforming>().array_geometry,
+                 config.Get<Beamforming>().target_direction,
 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
-      use_new_agc_(false),
+                 false,
 #else
-      use_new_agc_(config.Get<ExperimentalAgc>().enabled),
+                 config.Get<ExperimentalAgc>().enabled,
 #endif
-      agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume),
+                 config.Get<Intelligibility>().enabled,
+                 config.Get<Beamforming>().enabled),
+
 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
-      transient_suppressor_enabled_(false),
+      capture_(false)
 #else
-      transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
+      capture_(config.Get<ExperimentalNs>().enabled)
 #endif
-      beamformer_enabled_(config.Get<Beamforming>().enabled),
-      beamformer_(beamformer),
-      array_geometry_(config.Get<Beamforming>().array_geometry),
-      target_direction_(config.Get<Beamforming>().target_direction),
-      intelligibility_enabled_(config.Get<Intelligibility>().enabled) {
-  echo_cancellation_ = new EchoCancellationImpl(this, crit_);
-  component_list_.push_back(echo_cancellation_);
+{
+  {
+    rtc::CritScope cs_render(&crit_render_);
+    rtc::CritScope cs_capture(&crit_capture_);
 
-  echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
-  component_list_.push_back(echo_control_mobile_);
+    public_submodules_->echo_cancellation =
+        new EchoCancellationImpl(this, &crit_render_, &crit_capture_);
+    public_submodules_->echo_control_mobile =
+        new EchoControlMobileImpl(this, &crit_render_, &crit_capture_);
+    public_submodules_->gain_control =
+        new GainControlImpl(this, &crit_capture_, &crit_capture_);
+    public_submodules_->high_pass_filter =
+        new HighPassFilterImpl(this, &crit_capture_);
+    public_submodules_->level_estimator =
+        new LevelEstimatorImpl(this, &crit_capture_);
+    public_submodules_->noise_suppression =
+        new NoiseSuppressionImpl(this, &crit_capture_);
+    public_submodules_->voice_detection =
+        new VoiceDetectionImpl(this, &crit_capture_);
+    public_submodules_->gain_control_for_new_agc.reset(
+        new GainControlForNewAgc(public_submodules_->gain_control));
 
-  gain_control_ = new GainControlImpl(this, crit_);
-  component_list_.push_back(gain_control_);
-
-  high_pass_filter_ = new HighPassFilterImpl(this, crit_);
-  component_list_.push_back(high_pass_filter_);
-
-  level_estimator_ = new LevelEstimatorImpl(this, crit_);
-  component_list_.push_back(level_estimator_);
-
-  noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
-  component_list_.push_back(noise_suppression_);
-
-  voice_detection_ = new VoiceDetectionImpl(this, crit_);
-  component_list_.push_back(voice_detection_);
-
-  gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
+    private_submodules_->component_list.push_back(
+        public_submodules_->echo_cancellation);
+    private_submodules_->component_list.push_back(
+        public_submodules_->echo_control_mobile);
+    private_submodules_->component_list.push_back(
+        public_submodules_->gain_control);
+    private_submodules_->component_list.push_back(
+        public_submodules_->high_pass_filter);
+    private_submodules_->component_list.push_back(
+        public_submodules_->level_estimator);
+    private_submodules_->component_list.push_back(
+        public_submodules_->noise_suppression);
+    private_submodules_->component_list.push_back(
+        public_submodules_->voice_detection);
+  }
 
   SetExtraOptions(config);
 }
 
 AudioProcessingImpl::~AudioProcessingImpl() {
-  {
-    CriticalSectionScoped crit_scoped(crit_);
-    // Depends on gain_control_ and gain_control_for_new_agc_.
-    agc_manager_.reset();
-    // Depends on gain_control_.
-    gain_control_for_new_agc_.reset();
-    while (!component_list_.empty()) {
-      ProcessingComponent* component = component_list_.front();
-      component->Destroy();
-      delete component;
-      component_list_.pop_front();
-    }
+  // Depends on gain_control_ and
+  // public_submodules_->gain_control_for_new_agc.
+  private_submodules_->agc_manager.reset();
+  // Depends on gain_control_.
+  public_submodules_->gain_control_for_new_agc.reset();
+  while (!private_submodules_->component_list.empty()) {
+    ProcessingComponent* component =
+        private_submodules_->component_list.front();
+    component->Destroy();
+    delete component;
+    private_submodules_->component_list.pop_front();
+  }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-    if (debug_file_->Open()) {
-      debug_file_->CloseFile();
-    }
-#endif
+  if (debug_dump_.debug_file->Open()) {
+    debug_dump_.debug_file->CloseFile();
   }
-  delete crit_;
-  crit_ = NULL;
+#endif
 }
 
 int AudioProcessingImpl::Initialize() {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner during initialization.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
   return InitializeLocked();
 }
 
@@ -302,66 +325,73 @@
 }
 
 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner during initialization.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
   return InitializeLocked(processing_config);
 }
 
-int AudioProcessingImpl::MaybeInitializeLockedRender(
+int AudioProcessingImpl::MaybeInitializeRender(
     const ProcessingConfig& processing_config) {
-  return MaybeInitializeLocked(processing_config);
+  return MaybeInitialize(processing_config);
 }
 
-int AudioProcessingImpl::MaybeInitializeLockedCapture(
+int AudioProcessingImpl::MaybeInitializeCapture(
     const ProcessingConfig& processing_config) {
-  return MaybeInitializeLocked(processing_config);
+  return MaybeInitialize(processing_config);
 }
 
 // Calls InitializeLocked() if any of the audio parameters have changed from
-// their current values.
-int AudioProcessingImpl::MaybeInitializeLocked(
+// their current values (needs to be called while holding the crit_render_lock).
+int AudioProcessingImpl::MaybeInitialize(
     const ProcessingConfig& processing_config) {
-  if (processing_config == shared_state_.api_format_) {
+  // Called from both threads. Thread check is therefore not possible.
+  if (processing_config == formats_.api_format) {
     return kNoError;
   }
+
+  rtc::CritScope cs_capture(&crit_capture_);
   return InitializeLocked(processing_config);
 }
 
 int AudioProcessingImpl::InitializeLocked() {
   const int fwd_audio_buffer_channels =
-      beamformer_enabled_
-          ? shared_state_.api_format_.input_stream().num_channels()
-          : shared_state_.api_format_.output_stream().num_channels();
+      constants_.beamformer_enabled
+          ? formats_.api_format.input_stream().num_channels()
+          : formats_.api_format.output_stream().num_channels();
   const int rev_audio_buffer_out_num_frames =
-      shared_state_.api_format_.reverse_output_stream().num_frames() == 0
-          ? rev_proc_format_.num_frames()
-          : shared_state_.api_format_.reverse_output_stream().num_frames();
-  if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) {
-    render_audio_.reset(new AudioBuffer(
-        shared_state_.api_format_.reverse_input_stream().num_frames(),
-        shared_state_.api_format_.reverse_input_stream().num_channels(),
-        rev_proc_format_.num_frames(), rev_proc_format_.num_channels(),
+      formats_.api_format.reverse_output_stream().num_frames() == 0
+          ? formats_.rev_proc_format.num_frames()
+          : formats_.api_format.reverse_output_stream().num_frames();
+  if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
+    render_.render_audio.reset(new AudioBuffer(
+        formats_.api_format.reverse_input_stream().num_frames(),
+        formats_.api_format.reverse_input_stream().num_channels(),
+        formats_.rev_proc_format.num_frames(),
+        formats_.rev_proc_format.num_channels(),
         rev_audio_buffer_out_num_frames));
     if (rev_conversion_needed()) {
-      render_converter_ = AudioConverter::Create(
-          shared_state_.api_format_.reverse_input_stream().num_channels(),
-          shared_state_.api_format_.reverse_input_stream().num_frames(),
-          shared_state_.api_format_.reverse_output_stream().num_channels(),
-          shared_state_.api_format_.reverse_output_stream().num_frames());
+      render_.render_converter = AudioConverter::Create(
+          formats_.api_format.reverse_input_stream().num_channels(),
+          formats_.api_format.reverse_input_stream().num_frames(),
+          formats_.api_format.reverse_output_stream().num_channels(),
+          formats_.api_format.reverse_output_stream().num_frames());
     } else {
-      render_converter_.reset(nullptr);
+      render_.render_converter.reset(nullptr);
     }
   } else {
-    render_audio_.reset(nullptr);
-    render_converter_.reset(nullptr);
+    render_.render_audio.reset(nullptr);
+    render_.render_converter.reset(nullptr);
   }
-  capture_audio_.reset(
-      new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(),
-                      shared_state_.api_format_.input_stream().num_channels(),
-                      fwd_proc_format_.num_frames(), fwd_audio_buffer_channels,
-                      shared_state_.api_format_.output_stream().num_frames()));
+  capture_.capture_audio.reset(
+      new AudioBuffer(formats_.api_format.input_stream().num_frames(),
+                      formats_.api_format.input_stream().num_channels(),
+                      capture_nonlocked_.fwd_proc_format.num_frames(),
+                      fwd_audio_buffer_channels,
+                      formats_.api_format.output_stream().num_frames()));
 
   // Initialize all components.
-  for (auto item : component_list_) {
+  for (auto item : private_submodules_->component_list) {
     int err = item->Initialize();
     if (err != kNoError) {
       return err;
@@ -377,7 +407,7 @@
   InitializeIntelligibility();
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
+  if (debug_dump_.debug_file->Open()) {
     int err = WriteInitMessage();
     if (err != kNoError) {
       return err;
@@ -389,7 +419,6 @@
 }
 
 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
   for (const auto& stream : config.streams) {
     if (stream.num_channels() < 0) {
       return kBadNumberChannelsError;
@@ -409,18 +438,18 @@
     return kBadNumberChannelsError;
   }
 
-  if (beamformer_enabled_ &&
-      (static_cast<size_t>(num_in_channels) != array_geometry_.size() ||
-       num_out_channels > 1)) {
+  if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) !=
+                                            constants_.array_geometry.size() ||
+                                        num_out_channels > 1)) {
     return kBadNumberChannelsError;
   }
 
-  shared_state_.api_format_ = config;
+  formats_.api_format = config;
 
   // We process at the closest native rate >= min(input rate, output rate)...
   const int min_proc_rate =
-      std::min(shared_state_.api_format_.input_stream().sample_rate_hz(),
-               shared_state_.api_format_.output_stream().sample_rate_hz());
+      std::min(formats_.api_format.input_stream().sample_rate_hz(),
+               formats_.api_format.output_stream().sample_rate_hz());
   int fwd_proc_rate;
   for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
     fwd_proc_rate = kNativeSampleRatesHz[i];
@@ -429,20 +458,20 @@
     }
   }
   // ...with one exception.
-  if (echo_control_mobile_->is_enabled() &&
+  if (public_submodules_->echo_control_mobile->is_enabled() &&
       min_proc_rate > kMaxAECMSampleRateHz) {
     fwd_proc_rate = kMaxAECMSampleRateHz;
   }
 
-  fwd_proc_format_ = StreamConfig(fwd_proc_rate);
+  capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate);
 
   // We normally process the reverse stream at 16 kHz. Unless...
   int rev_proc_rate = kSampleRate16kHz;
-  if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) {
+  if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) {
     // ...the forward stream is at 8 kHz.
     rev_proc_rate = kSampleRate8kHz;
   } else {
-    if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() ==
+    if (formats_.api_format.reverse_input_stream().sample_rate_hz() ==
         kSampleRate32kHz) {
       // ...or the input is at 32 kHz, in which case we use the splitting
       // filter rather than the resampler.
@@ -452,61 +481,66 @@
 
   // Always downmix the reverse stream to mono for analysis. This has been
   // demonstrated to work well for AEC in most practical scenarios.
-  rev_proc_format_ = StreamConfig(rev_proc_rate, 1);
+  formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1);
 
-  if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
-      fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
-    split_rate_ = kSampleRate16kHz;
+  if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz ||
+      capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) {
+    capture_nonlocked_.split_rate = kSampleRate16kHz;
   } else {
-    split_rate_ = fwd_proc_format_.sample_rate_hz();
+    capture_nonlocked_.split_rate =
+        capture_nonlocked_.fwd_proc_format.sample_rate_hz();
   }
 
   return InitializeLocked();
 }
 
 void AudioProcessingImpl::SetExtraOptions(const Config& config) {
-  CriticalSectionScoped crit_scoped(crit_);
-  for (auto item : component_list_) {
+  // Run in a single-threaded manner when setting the extra options.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+  for (auto item : private_submodules_->component_list) {
     item->SetExtraOptions(config);
   }
 
-  if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
-    transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
+  if (capture_.transient_suppressor_enabled !=
+      config.Get<ExperimentalNs>().enabled) {
+    capture_.transient_suppressor_enabled =
+        config.Get<ExperimentalNs>().enabled;
     InitializeTransient();
   }
 }
 
-
 int AudioProcessingImpl::proc_sample_rate_hz() const {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  return fwd_proc_format_.sample_rate_hz();
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.fwd_proc_format.sample_rate_hz();
 }
 
 int AudioProcessingImpl::proc_split_sample_rate_hz() const {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-
-  return split_rate_;
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.split_rate;
 }
 
 int AudioProcessingImpl::num_reverse_channels() const {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  return rev_proc_format_.num_channels();
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.rev_proc_format.num_channels();
 }
 
 int AudioProcessingImpl::num_input_channels() const {
-  return shared_state_.api_format_.input_stream().num_channels();
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.api_format.input_stream().num_channels();
 }
 
 int AudioProcessingImpl::num_output_channels() const {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  return shared_state_.api_format_.output_stream().num_channels();
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.api_format.output_stream().num_channels();
 }
 
 void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
-  CriticalSectionScoped lock(crit_);
-  output_will_be_muted_ = muted;
-  if (agc_manager_.get()) {
-    agc_manager_->SetCaptureMuted(output_will_be_muted_);
+  rtc::CritScope cs(&crit_capture_);
+  capture_.output_will_be_muted = muted;
+  if (private_submodules_->agc_manager.get()) {
+    private_submodules_->agc_manager->SetCaptureMuted(
+        capture_.output_will_be_muted);
   }
 }
 
@@ -518,13 +552,20 @@
                                        int output_sample_rate_hz,
                                        ChannelLayout output_layout,
                                        float* const* dest) {
-  CriticalSectionScoped crit_scoped(crit_);
-  StreamConfig input_stream = shared_state_.api_format_.input_stream();
+  StreamConfig input_stream;
+  StreamConfig output_stream;
+  {
+    // Access the formats_.api_format.input_stream beneath the capture lock.
+    // The lock must be released as it is later required in the call
+    // to ProcessStream(,,,);
+    rtc::CritScope cs(&crit_capture_);
+    input_stream = formats_.api_format.input_stream();
+    output_stream = formats_.api_format.output_stream();
+  }
+
   input_stream.set_sample_rate_hz(input_sample_rate_hz);
   input_stream.set_num_channels(ChannelsFromLayout(input_layout));
   input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
-
-  StreamConfig output_stream = shared_state_.api_format_.output_stream();
   output_stream.set_sample_rate_hz(output_sample_rate_hz);
   output_stream.set_num_channels(ChannelsFromLayout(output_layout));
   output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
@@ -539,50 +580,61 @@
                                        const StreamConfig& input_config,
                                        const StreamConfig& output_config,
                                        float* const* dest) {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (!src || !dest) {
-    return kNullPointerError;
+  ProcessingConfig processing_config;
+  {
+    // Acquire the capture lock in order to safely call the function
+    // that retrieves the render side data. This function accesses apm
+    // getters that need the capture lock held when being called.
+    rtc::CritScope cs_capture(&crit_capture_);
+    public_submodules_->echo_cancellation->ReadQueuedRenderData();
+    public_submodules_->echo_control_mobile->ReadQueuedRenderData();
+    public_submodules_->gain_control->ReadQueuedRenderData();
+
+    if (!src || !dest) {
+      return kNullPointerError;
+    }
+
+    processing_config = formats_.api_format;
   }
 
-  echo_cancellation_->ReadQueuedRenderData();
-  echo_control_mobile_->ReadQueuedRenderData();
-  gain_control_->ReadQueuedRenderData();
-
-  ProcessingConfig processing_config = shared_state_.api_format_;
   processing_config.input_stream() = input_config;
   processing_config.output_stream() = output_config;
 
-  RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config));
+  {
+    // Do conditional reinitialization.
+    rtc::CritScope cs_render(&crit_render_);
+    RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
+  }
+  rtc::CritScope cs_capture(&crit_capture_);
   assert(processing_config.input_stream().num_frames() ==
-         shared_state_.api_format_.input_stream().num_frames());
+         formats_.api_format.input_stream().num_frames());
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
+  if (debug_dump_.debug_file->Open()) {
     RETURN_ON_ERR(WriteConfigMessage(false));
 
-    event_msg_->set_type(audioproc::Event::STREAM);
-    audioproc::Stream* msg = event_msg_->mutable_stream();
+    debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
+    audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t channel_size =
-        sizeof(float) * shared_state_.api_format_.input_stream().num_frames();
-    for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels();
-         ++i)
+        sizeof(float) * formats_.api_format.input_stream().num_frames();
+    for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i)
       msg->add_input_channel(src[i], channel_size);
   }
 #endif
 
-  capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream());
+  capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
   RETURN_ON_ERR(ProcessStreamLocked());
-  capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest);
+  capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    audioproc::Stream* msg = event_msg_->mutable_stream();
+  if (debug_dump_.debug_file->Open()) {
+    audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t channel_size =
-        sizeof(float) * shared_state_.api_format_.output_stream().num_frames();
-    for (int i = 0;
-         i < shared_state_.api_format_.output_stream().num_channels(); ++i)
+        sizeof(float) * formats_.api_format.output_stream().num_frames();
+    for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i)
       msg->add_output_channel(dest[i], channel_size);
-    RETURN_ON_ERR(WriteMessageToDebugFile());
+    RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                          &crit_debug_, &debug_dump_.capture));
   }
 #endif
 
@@ -590,10 +642,18 @@
 }
 
 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
-  CriticalSectionScoped crit_scoped(crit_);
-  echo_cancellation_->ReadQueuedRenderData();
-  echo_control_mobile_->ReadQueuedRenderData();
-  gain_control_->ReadQueuedRenderData();
+  {
+    // Acquire the capture lock in order to safely call the function
+    // that retrieves the render side data. This function accesses apm
+    // getters that need the capture lock held when being called.
+    // The lock needs to be released as
+    // public_submodules_->echo_control_mobile->is_enabled() aquires this lock
+    // as well.
+    rtc::CritScope cs_capture(&crit_capture_);
+    public_submodules_->echo_cancellation->ReadQueuedRenderData();
+    public_submodules_->echo_control_mobile->ReadQueuedRenderData();
+    public_submodules_->gain_control->ReadQueuedRenderData();
+  }
 
   if (!frame) {
     return kNullPointerError;
@@ -606,47 +666,61 @@
     return kBadSampleRateError;
   }
 
-  if (echo_control_mobile_->is_enabled() &&
+  if (public_submodules_->echo_control_mobile->is_enabled() &&
       frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
     LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
     return kUnsupportedComponentError;
   }
 
-  // TODO(ajm): The input and output rates and channels are currently
-  // constrained to be identical in the int16 interface.
-  ProcessingConfig processing_config = shared_state_.api_format_;
+  ProcessingConfig processing_config;
+  {
+    // Aquire lock for the access of api_format.
+    // The lock is released immediately due to the conditional
+    // reinitialization.
+    rtc::CritScope cs_capture(&crit_capture_);
+    // TODO(ajm): The input and output rates and channels are currently
+    // constrained to be identical in the int16 interface.
+    processing_config = formats_.api_format;
+  }
   processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
   processing_config.input_stream().set_num_channels(frame->num_channels_);
   processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
   processing_config.output_stream().set_num_channels(frame->num_channels_);
 
-  RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config));
+  {
+    // Do conditional reinitialization.
+    rtc::CritScope cs_render(&crit_render_);
+    RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
+  }
+  rtc::CritScope cs_capture(&crit_capture_);
   if (frame->samples_per_channel_ !=
-      shared_state_.api_format_.input_stream().num_frames()) {
+      formats_.api_format.input_stream().num_frames()) {
     return kBadDataLengthError;
   }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    event_msg_->set_type(audioproc::Event::STREAM);
-    audioproc::Stream* msg = event_msg_->mutable_stream();
+  if (debug_dump_.debug_file->Open()) {
+    debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
+    audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t data_size =
         sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
     msg->set_input_data(frame->data_, data_size);
   }
 #endif
 
-  capture_audio_->DeinterleaveFrom(frame);
+  capture_.capture_audio->DeinterleaveFrom(frame);
   RETURN_ON_ERR(ProcessStreamLocked());
-  capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
+  capture_.capture_audio->InterleaveTo(frame,
+                                       output_copy_needed(is_data_processed()));
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    audioproc::Stream* msg = event_msg_->mutable_stream();
+  if (debug_dump_.debug_file->Open()) {
+    audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
     const size_t data_size =
         sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
     msg->set_output_data(frame->data_, data_size);
-    RETURN_ON_ERR(WriteMessageToDebugFile());
+    RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                          &crit_debug_, &debug_dump_.capture));
   }
 #endif
 
@@ -655,22 +729,25 @@
 
 int AudioProcessingImpl::ProcessStreamLocked() {
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    audioproc::Stream* msg = event_msg_->mutable_stream();
-    msg->set_delay(stream_delay_ms_);
-    msg->set_drift(echo_cancellation_->stream_drift_samples());
+  if (debug_dump_.debug_file->Open()) {
+    audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
+    msg->set_delay(capture_nonlocked_.stream_delay_ms);
+    msg->set_drift(
+        public_submodules_->echo_cancellation->stream_drift_samples());
     msg->set_level(gain_control()->stream_analog_level());
-    msg->set_keypress(key_pressed_);
+    msg->set_keypress(capture_.key_pressed);
   }
 #endif
 
   MaybeUpdateHistograms();
 
-  AudioBuffer* ca = capture_audio_.get();  // For brevity.
+  AudioBuffer* ca = capture_.capture_audio.get();  // For brevity.
 
-  if (use_new_agc_ && gain_control_->is_enabled()) {
-    agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(),
-                                    fwd_proc_format_.num_frames());
+  if (constants_.use_new_agc &&
+      public_submodules_->gain_control->is_enabled()) {
+    private_submodules_->agc_manager->AnalyzePreProcess(
+        ca->channels()[0], ca->num_channels(),
+        capture_nonlocked_.fwd_proc_format.num_frames());
   }
 
   bool data_processed = is_data_processed();
@@ -678,34 +755,41 @@
     ca->SplitIntoFrequencyBands();
   }
 
-  if (intelligibility_enabled_) {
-    intelligibility_enhancer_->AnalyzeCaptureAudio(
-        ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels());
+  if (constants_.intelligibility_enabled) {
+    public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio(
+        ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
+        ca->num_channels());
   }
 
-  if (beamformer_enabled_) {
-    beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f());
+  if (constants_.beamformer_enabled) {
+    private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
+                                                  ca->split_data_f());
     ca->set_num_channels(1);
   }
 
-  RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
-  RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
-  RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
-  RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->high_pass_filter->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca));
 
-  if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
+  if (public_submodules_->echo_control_mobile->is_enabled() &&
+      public_submodules_->noise_suppression->is_enabled()) {
     ca->CopyLowPassToReference();
   }
-  RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
-  RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
-  RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->noise_suppression->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(
+      public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->voice_detection->ProcessCaptureAudio(ca));
 
-  if (use_new_agc_ && gain_control_->is_enabled() &&
-      (!beamformer_enabled_ || beamformer_->is_target_present())) {
-    agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
-                          ca->num_frames_per_band(), split_rate_);
+  if (constants_.use_new_agc &&
+      public_submodules_->gain_control->is_enabled() &&
+      (!constants_.beamformer_enabled ||
+       private_submodules_->beamformer->is_target_present())) {
+    private_submodules_->agc_manager->Process(
+        ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(),
+        capture_nonlocked_.split_rate);
   }
-  RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
+  RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca));
 
   if (synthesis_needed(data_processed)) {
     ca->MergeFrequencyBands();
@@ -713,21 +797,23 @@
 
   // TODO(aluebs): Investigate if the transient suppression placement should be
   // before or after the AGC.
-  if (transient_suppressor_enabled_) {
+  if (capture_.transient_suppressor_enabled) {
     float voice_probability =
-        agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
+        private_submodules_->agc_manager.get()
+            ? private_submodules_->agc_manager->voice_probability()
+            : 1.f;
 
-    transient_suppressor_->Suppress(
+    public_submodules_->transient_suppressor->Suppress(
         ca->channels_f()[0], ca->num_frames(), ca->num_channels(),
         ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(),
         ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability,
-        key_pressed_);
+        capture_.key_pressed);
   }
 
   // The level estimator operates on the recombined data.
-  RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
+  RETURN_ON_ERR(public_submodules_->level_estimator->ProcessStream(ca));
 
-  was_stream_delay_set_ = false;
+  capture_.was_stream_delay_set = false;
   return kNoError;
 }
 
@@ -735,13 +821,14 @@
                                               size_t samples_per_channel,
                                               int rev_sample_rate_hz,
                                               ChannelLayout layout) {
+  rtc::CritScope cs(&crit_render_);
   const StreamConfig reverse_config = {
       rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
   };
   if (samples_per_channel != reverse_config.num_frames()) {
     return kBadDataLengthError;
   }
-  return AnalyzeReverseStream(data, reverse_config, reverse_config);
+  return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
 }
 
 int AudioProcessingImpl::ProcessReverseStream(
@@ -749,14 +836,16 @@
     const StreamConfig& reverse_input_config,
     const StreamConfig& reverse_output_config,
     float* const* dest) {
-  RETURN_ON_ERR(
-      AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
+  rtc::CritScope cs(&crit_render_);
+  RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config,
+                                           reverse_output_config));
   if (is_rev_processed()) {
-    render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(),
-                          dest);
+    render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
+                                 dest);
   } else if (render_check_rev_conversion_needed()) {
-    render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
-                               reverse_output_config.num_samples());
+    render_.render_converter->Convert(src, reverse_input_config.num_samples(),
+                                      dest,
+                                      reverse_output_config.num_samples());
   } else {
     CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
                       reverse_input_config.num_channels(), dest);
@@ -765,12 +854,11 @@
   return kNoError;
 }
 
-int AudioProcessingImpl::AnalyzeReverseStream(
+int AudioProcessingImpl::AnalyzeReverseStreamLocked(
     const float* const* src,
     const StreamConfig& reverse_input_config,
     const StreamConfig& reverse_output_config) {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (src == NULL) {
+  if (src == nullptr) {
     return kNullPointerError;
   }
 
@@ -778,46 +866,47 @@
     return kBadNumberChannelsError;
   }
 
-  ProcessingConfig processing_config = shared_state_.api_format_;
+  ProcessingConfig processing_config = formats_.api_format;
   processing_config.reverse_input_stream() = reverse_input_config;
   processing_config.reverse_output_stream() = reverse_output_config;
 
-  RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config));
+  RETURN_ON_ERR(MaybeInitializeRender(processing_config));
   assert(reverse_input_config.num_frames() ==
-         shared_state_.api_format_.reverse_input_stream().num_frames());
+         formats_.api_format.reverse_input_stream().num_frames());
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
-    audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+  if (debug_dump_.debug_file->Open()) {
+    debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
+    audioproc::ReverseStream* msg =
+        debug_dump_.render.event_msg->mutable_reverse_stream();
     const size_t channel_size =
-        sizeof(float) *
-        shared_state_.api_format_.reverse_input_stream().num_frames();
+        sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
     for (int i = 0;
-         i < shared_state_.api_format_.reverse_input_stream().num_channels();
-         ++i)
+         i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
       msg->add_channel(src[i], channel_size);
-    RETURN_ON_ERR(WriteMessageToDebugFile());
+    RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                          &crit_debug_, &debug_dump_.render));
   }
 #endif
 
-  render_audio_->CopyFrom(src,
-                          shared_state_.api_format_.reverse_input_stream());
+  render_.render_audio->CopyFrom(src,
+                                 formats_.api_format.reverse_input_stream());
   return ProcessReverseStreamLocked();
 }
 
 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
   RETURN_ON_ERR(AnalyzeReverseStream(frame));
+  rtc::CritScope cs(&crit_render_);
   if (is_rev_processed()) {
-    render_audio_->InterleaveTo(frame, true);
+    render_.render_audio->InterleaveTo(frame, true);
   }
 
   return kNoError;
 }
 
 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (frame == NULL) {
+  rtc::CritScope cs(&crit_render_);
+  if (frame == nullptr) {
     return kNullPointerError;
   }
   // Must be a native rate.
@@ -829,7 +918,7 @@
   }
   // This interface does not tolerate different forward and reverse rates.
   if (frame->sample_rate_hz_ !=
-      shared_state_.api_format_.input_stream().sample_rate_hz()) {
+      formats_.api_format.input_stream().sample_rate_hz()) {
     return kBadSampleRateError;
   }
 
@@ -837,7 +926,7 @@
     return kBadNumberChannelsError;
   }
 
-  ProcessingConfig processing_config = shared_state_.api_format_;
+  ProcessingConfig processing_config = formats_.api_format;
   processing_config.reverse_input_stream().set_sample_rate_hz(
       frame->sample_rate_hz_);
   processing_config.reverse_input_stream().set_num_channels(
@@ -847,44 +936,52 @@
   processing_config.reverse_output_stream().set_num_channels(
       frame->num_channels_);
 
-  RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config));
+  RETURN_ON_ERR(MaybeInitializeRender(processing_config));
   if (frame->samples_per_channel_ !=
-      shared_state_.api_format_.reverse_input_stream().num_frames()) {
+      formats_.api_format.reverse_input_stream().num_frames()) {
     return kBadDataLengthError;
   }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-  if (debug_file_->Open()) {
-    event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
-    audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+  if (debug_dump_.debug_file->Open()) {
+    debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
+    audioproc::ReverseStream* msg =
+        debug_dump_.render.event_msg->mutable_reverse_stream();
     const size_t data_size =
         sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
     msg->set_data(frame->data_, data_size);
-    RETURN_ON_ERR(WriteMessageToDebugFile());
+    RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                          &crit_debug_, &debug_dump_.render));
   }
 #endif
-  render_audio_->DeinterleaveFrom(frame);
+  render_.render_audio->DeinterleaveFrom(frame);
   return ProcessReverseStreamLocked();
 }
 
 int AudioProcessingImpl::ProcessReverseStreamLocked() {
-  AudioBuffer* ra = render_audio_.get();  // For brevity.
-  if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) {
+  AudioBuffer* ra = render_.render_audio.get();  // For brevity.
+  if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) {
     ra->SplitIntoFrequencyBands();
   }
 
-  if (intelligibility_enabled_) {
-    intelligibility_enhancer_->ProcessRenderAudio(
-        ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels());
+  if (constants_.intelligibility_enabled) {
+    // Currently run in single-threaded mode when the intelligibility
+    // enhancer is activated.
+    // TODO(peah): Fix to be properly multi-threaded.
+    rtc::CritScope cs(&crit_capture_);
+    public_submodules_->intelligibility_enhancer->ProcessRenderAudio(
+        ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
+        ra->num_channels());
   }
 
-  RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
-  RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
-  if (!use_new_agc_) {
-    RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
+  RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra));
+  RETURN_ON_ERR(
+      public_submodules_->echo_control_mobile->ProcessRenderAudio(ra));
+  if (!constants_.use_new_agc) {
+    RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra));
   }
 
-  if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz &&
+  if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz &&
       is_rev_processed()) {
     ra->MergeFrequencyBands();
   }
@@ -893,9 +990,10 @@
 }
 
 int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+  rtc::CritScope cs(&crit_capture_);
   Error retval = kNoError;
-  was_stream_delay_set_ = true;
-  delay += delay_offset_ms_;
+  capture_.was_stream_delay_set = true;
+  delay += capture_.delay_offset_ms;
 
   if (delay < 0) {
     delay = 0;
@@ -908,50 +1006,56 @@
     retval = kBadStreamParameterWarning;
   }
 
-  stream_delay_ms_ = delay;
+  capture_nonlocked_.stream_delay_ms = delay;
   return retval;
 }
 
 int AudioProcessingImpl::stream_delay_ms() const {
-  return stream_delay_ms_;
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.stream_delay_ms;
 }
 
 bool AudioProcessingImpl::was_stream_delay_set() const {
-  return was_stream_delay_set_;
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_.was_stream_delay_set;
 }
 
 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
-  key_pressed_ = key_pressed;
+  rtc::CritScope cs(&crit_capture_);
+  capture_.key_pressed = key_pressed;
 }
 
 void AudioProcessingImpl::set_delay_offset_ms(int offset) {
-  CriticalSectionScoped crit_scoped(crit_);
-  delay_offset_ms_ = offset;
+  rtc::CritScope cs(&crit_capture_);
+  capture_.delay_offset_ms = offset;
 }
 
 int AudioProcessingImpl::delay_offset_ms() const {
-  return delay_offset_ms_;
+  rtc::CritScope cs(&crit_capture_);
+  return capture_.delay_offset_ms;
 }
 
 int AudioProcessingImpl::StartDebugRecording(
     const char filename[AudioProcessing::kMaxFilenameSize]) {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
   static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
 
-  if (filename == NULL) {
+  if (filename == nullptr) {
     return kNullPointerError;
   }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // Stop any ongoing recording.
-  if (debug_file_->Open()) {
-    if (debug_file_->CloseFile() == -1) {
+  if (debug_dump_.debug_file->Open()) {
+    if (debug_dump_.debug_file->CloseFile() == -1) {
       return kFileError;
     }
   }
 
-  if (debug_file_->OpenFile(filename, false) == -1) {
-    debug_file_->CloseFile();
+  if (debug_dump_.debug_file->OpenFile(filename, false) == -1) {
+    debug_dump_.debug_file->CloseFile();
     return kFileError;
   }
 
@@ -964,21 +1068,23 @@
 }
 
 int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
 
-  if (handle == NULL) {
+  if (handle == nullptr) {
     return kNullPointerError;
   }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // Stop any ongoing recording.
-  if (debug_file_->Open()) {
-    if (debug_file_->CloseFile() == -1) {
+  if (debug_dump_.debug_file->Open()) {
+    if (debug_dump_.debug_file->CloseFile() == -1) {
       return kFileError;
     }
   }
 
-  if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
+  if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) {
     return kFileError;
   }
 
@@ -992,17 +1098,22 @@
 
 int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
     rtc::PlatformFile handle) {
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
   FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
   return StartDebugRecording(stream);
 }
 
 int AudioProcessingImpl::StopDebugRecording() {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // We just return if recording hasn't started.
-  if (debug_file_->Open()) {
-    if (debug_file_->CloseFile() == -1) {
+  if (debug_dump_.debug_file->Open()) {
+    if (debug_dump_.debug_file->CloseFile() == -1) {
       return kFileError;
     }
   }
@@ -1013,58 +1124,75 @@
 }
 
 EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
-  return echo_cancellation_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->echo_cancellation;
 }
 
 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
-  return echo_control_mobile_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->echo_control_mobile;
 }
 
 GainControl* AudioProcessingImpl::gain_control() const {
-  if (use_new_agc_) {
-    return gain_control_for_new_agc_.get();
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  if (constants_.use_new_agc) {
+    return public_submodules_->gain_control_for_new_agc.get();
   }
-  return gain_control_;
+  return public_submodules_->gain_control;
 }
 
 HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
-  return high_pass_filter_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->high_pass_filter;
 }
 
 LevelEstimator* AudioProcessingImpl::level_estimator() const {
-  return level_estimator_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->level_estimator;
 }
 
 NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
-  return noise_suppression_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->noise_suppression;
 }
 
 VoiceDetection* AudioProcessingImpl::voice_detection() const {
-  return voice_detection_;
+  // Adding a lock here has no effect as it allows any access to the submodule
+  // from the returned pointer.
+  return public_submodules_->voice_detection;
 }
 
 bool AudioProcessingImpl::is_data_processed() const {
-  if (beamformer_enabled_) {
+  if (constants_.beamformer_enabled) {
     return true;
   }
 
   int enabled_count = 0;
-  for (auto item : component_list_) {
+  for (auto item : private_submodules_->component_list) {
     if (item->is_component_enabled()) {
       enabled_count++;
     }
   }
 
-  // Data is unchanged if no components are enabled, or if only level_estimator_
-  // or voice_detection_ is enabled.
+  // Data is unchanged if no components are enabled, or if only
+  // public_submodules_->level_estimator
+  // or public_submodules_->voice_detection is enabled.
   if (enabled_count == 0) {
     return false;
   } else if (enabled_count == 1) {
-    if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
+    if (public_submodules_->level_estimator->is_enabled() ||
+        public_submodules_->voice_detection->is_enabled()) {
       return false;
     }
   } else if (enabled_count == 2) {
-    if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
+    if (public_submodules_->level_estimator->is_enabled() &&
+        public_submodules_->voice_detection->is_enabled()) {
       return false;
     }
   }
@@ -1073,32 +1201,39 @@
 
 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
   // Check if we've upmixed or downmixed the audio.
-  return ((shared_state_.api_format_.output_stream().num_channels() !=
-           shared_state_.api_format_.input_stream().num_channels()) ||
-          is_data_processed || transient_suppressor_enabled_);
+  return ((formats_.api_format.output_stream().num_channels() !=
+           formats_.api_format.input_stream().num_channels()) ||
+          is_data_processed || capture_.transient_suppressor_enabled);
 }
 
 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
   return (is_data_processed &&
-          (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
-           fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz));
+          (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+               kSampleRate32kHz ||
+           capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+               kSampleRate48kHz));
 }
 
 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
-  if (!is_data_processed && !voice_detection_->is_enabled() &&
-      !transient_suppressor_enabled_) {
-    // Only level_estimator_ is enabled.
+  if (!is_data_processed &&
+      !public_submodules_->voice_detection->is_enabled() &&
+      !capture_.transient_suppressor_enabled) {
+    // Only public_submodules_->level_estimator is enabled.
     return false;
-  } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
-             fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
-    // Something besides level_estimator_ is enabled, and we have super-wb.
+  } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+                 kSampleRate32kHz ||
+             capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+                 kSampleRate48kHz) {
+    // Something besides public_submodules_->level_estimator is enabled, and we
+    // have super-wb.
     return true;
   }
   return false;
 }
 
 bool AudioProcessingImpl::is_rev_processed() const {
-  return intelligibility_enabled_ && intelligibility_enhancer_->active();
+  return constants_.intelligibility_enabled &&
+         public_submodules_->intelligibility_enhancer->active();
 }
 
 bool AudioProcessingImpl::render_check_rev_conversion_needed() const {
@@ -1106,55 +1241,55 @@
 }
 
 bool AudioProcessingImpl::rev_conversion_needed() const {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  return (shared_state_.api_format_.reverse_input_stream() !=
-          shared_state_.api_format_.reverse_output_stream());
+  return (formats_.api_format.reverse_input_stream() !=
+          formats_.api_format.reverse_output_stream());
 }
 
 void AudioProcessingImpl::InitializeExperimentalAgc() {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  if (use_new_agc_) {
-    if (!agc_manager_.get()) {
-      agc_manager_.reset(new AgcManagerDirect(gain_control_,
-                                              gain_control_for_new_agc_.get(),
-                                              agc_startup_min_volume_));
+  if (constants_.use_new_agc) {
+    if (!private_submodules_->agc_manager.get()) {
+      private_submodules_->agc_manager.reset(new AgcManagerDirect(
+          public_submodules_->gain_control,
+          public_submodules_->gain_control_for_new_agc.get(),
+          constants_.agc_startup_min_volume));
     }
-    agc_manager_->Initialize();
-    agc_manager_->SetCaptureMuted(output_will_be_muted_);
+    private_submodules_->agc_manager->Initialize();
+    private_submodules_->agc_manager->SetCaptureMuted(
+        capture_.output_will_be_muted);
   }
 }
 
 void AudioProcessingImpl::InitializeTransient() {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  if (transient_suppressor_enabled_) {
-    if (!transient_suppressor_.get()) {
-      transient_suppressor_.reset(new TransientSuppressor());
+  if (capture_.transient_suppressor_enabled) {
+    if (!public_submodules_->transient_suppressor.get()) {
+      public_submodules_->transient_suppressor.reset(new TransientSuppressor());
     }
-    transient_suppressor_->Initialize(
-        fwd_proc_format_.sample_rate_hz(), split_rate_,
-        shared_state_.api_format_.output_stream().num_channels());
+    public_submodules_->transient_suppressor->Initialize(
+        capture_nonlocked_.fwd_proc_format.sample_rate_hz(),
+        capture_nonlocked_.split_rate,
+        formats_.api_format.output_stream().num_channels());
   }
 }
 
 void AudioProcessingImpl::InitializeBeamformer() {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  if (beamformer_enabled_) {
-    if (!beamformer_) {
-      beamformer_.reset(
-          new NonlinearBeamformer(array_geometry_, target_direction_));
+  if (constants_.beamformer_enabled) {
+    if (!private_submodules_->beamformer) {
+      private_submodules_->beamformer.reset(new NonlinearBeamformer(
+          constants_.array_geometry, constants_.target_direction));
     }
-    beamformer_->Initialize(kChunkSizeMs, split_rate_);
+    private_submodules_->beamformer->Initialize(kChunkSizeMs,
+                                                capture_nonlocked_.split_rate);
   }
 }
 
 void AudioProcessingImpl::InitializeIntelligibility() {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  if (intelligibility_enabled_) {
+  if (constants_.intelligibility_enabled) {
     IntelligibilityEnhancer::Config config;
-    config.sample_rate_hz = split_rate_;
-    config.num_capture_channels = capture_audio_->num_channels();
-    config.num_render_channels = render_audio_->num_channels();
-    intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config));
+    config.sample_rate_hz = capture_nonlocked_.split_rate;
+    config.num_capture_channels = capture_.capture_audio->num_channels();
+    config.num_render_channels = render_.render_audio->num_channels();
+    public_submodules_->intelligibility_enhancer.reset(
+        new IntelligibilityEnhancer(config));
   }
 }
 
@@ -1164,67 +1299,77 @@
   if (echo_cancellation()->is_enabled()) {
     // Activate delay_jumps_ counters if we know echo_cancellation is runnning.
     // If a stream has echo we know that the echo_cancellation is in process.
-    if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) {
-      stream_delay_jumps_ = 0;
-    }
-    if (aec_system_delay_jumps_ == -1 &&
+    if (capture_.stream_delay_jumps == -1 &&
         echo_cancellation()->stream_has_echo()) {
-      aec_system_delay_jumps_ = 0;
+      capture_.stream_delay_jumps = 0;
+    }
+    if (capture_.aec_system_delay_jumps == -1 &&
+        echo_cancellation()->stream_has_echo()) {
+      capture_.aec_system_delay_jumps = 0;
     }
 
     // Detect a jump in platform reported system delay and log the difference.
-    const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_;
-    if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) {
+    const int diff_stream_delay_ms =
+        capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms;
+    if (diff_stream_delay_ms > kMinDiffDelayMs &&
+        capture_.last_stream_delay_ms != 0) {
       RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump",
                            diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100);
-      if (stream_delay_jumps_ == -1) {
-        stream_delay_jumps_ = 0;  // Activate counter if needed.
+      if (capture_.stream_delay_jumps == -1) {
+        capture_.stream_delay_jumps = 0;  // Activate counter if needed.
       }
-      stream_delay_jumps_++;
+      capture_.stream_delay_jumps++;
     }
-    last_stream_delay_ms_ = stream_delay_ms_;
+    capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms;
 
     // Detect a jump in AEC system delay and log the difference.
-    const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000);
+    const int frames_per_ms =
+        rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000);
     const int aec_system_delay_ms =
         WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms;
     const int diff_aec_system_delay_ms =
-        aec_system_delay_ms - last_aec_system_delay_ms_;
+        aec_system_delay_ms - capture_.last_aec_system_delay_ms;
     if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
-        last_aec_system_delay_ms_ != 0) {
+        capture_.last_aec_system_delay_ms != 0) {
       RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
                            diff_aec_system_delay_ms, kMinDiffDelayMs, 1000,
                            100);
-      if (aec_system_delay_jumps_ == -1) {
-        aec_system_delay_jumps_ = 0;  // Activate counter if needed.
+      if (capture_.aec_system_delay_jumps == -1) {
+        capture_.aec_system_delay_jumps = 0;  // Activate counter if needed.
       }
-      aec_system_delay_jumps_++;
+      capture_.aec_system_delay_jumps++;
     }
-    last_aec_system_delay_ms_ = aec_system_delay_ms;
+    capture_.last_aec_system_delay_ms = aec_system_delay_ms;
   }
 }
 
 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (stream_delay_jumps_ > -1) {
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+
+  if (capture_.stream_delay_jumps > -1) {
     RTC_HISTOGRAM_ENUMERATION(
         "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps",
-        stream_delay_jumps_, 51);
+        capture_.stream_delay_jumps, 51);
   }
-  stream_delay_jumps_ = -1;
-  last_stream_delay_ms_ = 0;
+  capture_.stream_delay_jumps = -1;
+  capture_.last_stream_delay_ms = 0;
 
-  if (aec_system_delay_jumps_ > -1) {
+  if (capture_.aec_system_delay_jumps > -1) {
     RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
-                              aec_system_delay_jumps_, 51);
+                              capture_.aec_system_delay_jumps, 51);
   }
-  aec_system_delay_jumps_ = -1;
-  last_aec_system_delay_ms_ = 0;
+  capture_.aec_system_delay_jumps = -1;
+  capture_.last_aec_system_delay_ms = 0;
 }
 
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-int AudioProcessingImpl::WriteMessageToDebugFile() {
-  int32_t size = event_msg_->ByteSize();
+int AudioProcessingImpl::WriteMessageToDebugFile(
+    FileWrapper* debug_file,
+    rtc::CriticalSection* crit_debug,
+    ApmDebugDumpThreadState* debug_state) {
+  int32_t size = debug_state->event_msg->ByteSize();
   if (size <= 0) {
     return kUnspecifiedError;
   }
@@ -1233,87 +1378,100 @@
 //            pretty safe in assuming little-endian.
 #endif
 
-  if (!event_msg_->SerializeToString(&event_str_)) {
+  if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) {
     return kUnspecifiedError;
   }
 
-  // Write message preceded by its size.
-  if (!debug_file_->Write(&size, sizeof(int32_t))) {
-    return kFileError;
-  }
-  if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
-    return kFileError;
+  {
+    // Ensure atomic writes of the message.
+    rtc::CritScope cs_capture(crit_debug);
+    // Write message preceded by its size.
+    if (!debug_file->Write(&size, sizeof(int32_t))) {
+      return kFileError;
+    }
+    if (!debug_file->Write(debug_state->event_str.data(),
+                           debug_state->event_str.length())) {
+      return kFileError;
+    }
   }
 
-  event_msg_->Clear();
+  debug_state->event_msg->Clear();
 
   return kNoError;
 }
 
 int AudioProcessingImpl::WriteInitMessage() {
-  // TODO(peah): Refactor to be allowed to verify using thread annotations.
-  event_msg_->set_type(audioproc::Event::INIT);
-  audioproc::Init* msg = event_msg_->mutable_init();
-  msg->set_sample_rate(
-      shared_state_.api_format_.input_stream().sample_rate_hz());
-  msg->set_num_input_channels(
-      shared_state_.api_format_.input_stream().num_channels());
-  msg->set_num_output_channels(
-      shared_state_.api_format_.output_stream().num_channels());
-  msg->set_num_reverse_channels(
-      shared_state_.api_format_.reverse_input_stream().num_channels());
-  msg->set_reverse_sample_rate(
-      shared_state_.api_format_.reverse_input_stream().sample_rate_hz());
-  msg->set_output_sample_rate(
-      shared_state_.api_format_.output_stream().sample_rate_hz());
-  // TODO(ekmeyerson): Add reverse output fields to event_msg_.
+  debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT);
+  audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init();
+  msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz());
 
-  RETURN_ON_ERR(WriteMessageToDebugFile());
+  msg->set_num_input_channels(
+      formats_.api_format.input_stream().num_channels());
+  msg->set_num_output_channels(
+      formats_.api_format.output_stream().num_channels());
+  msg->set_num_reverse_channels(
+      formats_.api_format.reverse_input_stream().num_channels());
+  msg->set_reverse_sample_rate(
+      formats_.api_format.reverse_input_stream().sample_rate_hz());
+  msg->set_output_sample_rate(
+      formats_.api_format.output_stream().sample_rate_hz());
+  // TODO(ekmeyerson): Add reverse output fields to
+  // debug_dump_.capture.event_msg.
+
+  RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                        &crit_debug_, &debug_dump_.capture));
   return kNoError;
 }
 
 int AudioProcessingImpl::WriteConfigMessage(bool forced) {
   audioproc::Config config;
 
-  config.set_aec_enabled(echo_cancellation_->is_enabled());
+  config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled());
   config.set_aec_delay_agnostic_enabled(
-      echo_cancellation_->is_delay_agnostic_enabled());
+      public_submodules_->echo_cancellation->is_delay_agnostic_enabled());
   config.set_aec_drift_compensation_enabled(
-      echo_cancellation_->is_drift_compensation_enabled());
+      public_submodules_->echo_cancellation->is_drift_compensation_enabled());
   config.set_aec_extended_filter_enabled(
-      echo_cancellation_->is_extended_filter_enabled());
-  config.set_aec_suppression_level(
-      static_cast<int>(echo_cancellation_->suppression_level()));
+      public_submodules_->echo_cancellation->is_extended_filter_enabled());
+  config.set_aec_suppression_level(static_cast<int>(
+      public_submodules_->echo_cancellation->suppression_level()));
 
-  config.set_aecm_enabled(echo_control_mobile_->is_enabled());
+  config.set_aecm_enabled(
+      public_submodules_->echo_control_mobile->is_enabled());
   config.set_aecm_comfort_noise_enabled(
-      echo_control_mobile_->is_comfort_noise_enabled());
-  config.set_aecm_routing_mode(
-      static_cast<int>(echo_control_mobile_->routing_mode()));
+      public_submodules_->echo_control_mobile->is_comfort_noise_enabled());
+  config.set_aecm_routing_mode(static_cast<int>(
+      public_submodules_->echo_control_mobile->routing_mode()));
 
-  config.set_agc_enabled(gain_control_->is_enabled());
-  config.set_agc_mode(static_cast<int>(gain_control_->mode()));
-  config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled());
-  config.set_noise_robust_agc_enabled(use_new_agc_);
+  config.set_agc_enabled(public_submodules_->gain_control->is_enabled());
+  config.set_agc_mode(
+      static_cast<int>(public_submodules_->gain_control->mode()));
+  config.set_agc_limiter_enabled(
+      public_submodules_->gain_control->is_limiter_enabled());
+  config.set_noise_robust_agc_enabled(constants_.use_new_agc);
 
-  config.set_hpf_enabled(high_pass_filter_->is_enabled());
+  config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled());
 
-  config.set_ns_enabled(noise_suppression_->is_enabled());
-  config.set_ns_level(static_cast<int>(noise_suppression_->level()));
+  config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled());
+  config.set_ns_level(
+      static_cast<int>(public_submodules_->noise_suppression->level()));
 
-  config.set_transient_suppression_enabled(transient_suppressor_enabled_);
+  config.set_transient_suppression_enabled(
+      capture_.transient_suppressor_enabled);
 
   std::string serialized_config = config.SerializeAsString();
-  if (!forced && last_serialized_config_ == serialized_config) {
+  if (!forced &&
+      debug_dump_.capture.last_serialized_config == serialized_config) {
     return kNoError;
   }
 
-  last_serialized_config_ = serialized_config;
+  debug_dump_.capture.last_serialized_config = serialized_config;
 
-  event_msg_->set_type(audioproc::Event::CONFIG);
-  event_msg_->mutable_config()->CopyFrom(config);
+  debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG);
+  debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config);
 
-  RETURN_ON_ERR(WriteMessageToDebugFile());
+  RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+                                        &crit_debug_, &debug_dump_.capture));
   return kNoError;
 }
 #endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index 93d64fd..4a290a8 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -15,23 +15,32 @@
 #include <string>
 #include <vector>
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/system_wrappers/include/file_wrapper.h"
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+// Files generated at build-time by the protobuf compiler.
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
 
 namespace webrtc {
 
 class AgcManagerDirect;
-class AudioBuffer;
 class AudioConverter;
 
 template<typename T>
 class Beamformer;
 
-class CriticalSectionWrapper;
 class EchoCancellationImpl;
 class EchoControlMobileImpl;
-class FileWrapper;
 class GainControlImpl;
 class GainControlForNewAgc;
 class HighPassFilterImpl;
@@ -42,23 +51,14 @@
 class VoiceDetectionImpl;
 class IntelligibilityEnhancer;
 
-#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-namespace audioproc {
-
-class Event;
-
-}  // namespace audioproc
-#endif
-
 class AudioProcessingImpl : public AudioProcessing {
  public:
+  // Methods forcing APM to run in a single-threaded manner.
+  // Acquires both the render and capture locks.
   explicit AudioProcessingImpl(const Config& config);
-
   // AudioProcessingImpl takes ownership of beamformer.
   AudioProcessingImpl(const Config& config, Beamformer<float>* beamformer);
   virtual ~AudioProcessingImpl();
-
-  // AudioProcessing methods.
   int Initialize() override;
   int Initialize(int input_sample_rate_hz,
                  int output_sample_rate_hz,
@@ -68,12 +68,14 @@
                  ChannelLayout reverse_layout) override;
   int Initialize(const ProcessingConfig& processing_config) override;
   void SetExtraOptions(const Config& config) override;
-  int proc_sample_rate_hz() const override;
-  int proc_split_sample_rate_hz() const override;
-  int num_input_channels() const override;
-  int num_output_channels() const override;
-  int num_reverse_channels() const override;
-  void set_output_will_be_muted(bool muted) override;
+  void UpdateHistogramsOnCallEnd() override;
+  int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
+  int StartDebugRecording(FILE* handle) override;
+  int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
+  int StopDebugRecording() override;
+
+  // Capture-side exclusive methods possibly running APM in a
+  // multi-threaded manner. Acquire the capture lock.
   int ProcessStream(AudioFrame* frame) override;
   int ProcessStream(const float* const* src,
                     size_t samples_per_channel,
@@ -86,6 +88,14 @@
                     const StreamConfig& input_config,
                     const StreamConfig& output_config,
                     float* const* dest) override;
+  void set_output_will_be_muted(bool muted) override;
+  int set_stream_delay_ms(int delay) override;
+  void set_delay_offset_ms(int offset) override;
+  int delay_offset_ms() const override;
+  void set_stream_key_pressed(bool key_pressed) override;
+
+  // Render-side exclusive methods possibly running APM in a
+  // multi-threaded manner. Acquire the render lock.
   int AnalyzeReverseStream(AudioFrame* frame) override;
   int ProcessReverseStream(AudioFrame* frame) override;
   int AnalyzeReverseStream(const float* const* data,
@@ -96,17 +106,24 @@
                            const StreamConfig& reverse_input_config,
                            const StreamConfig& reverse_output_config,
                            float* const* dest) override;
-  int set_stream_delay_ms(int delay) override;
+
+  // Methods only accessed from APM submodules or
+  // from AudioProcessing tests in a single-threaded manner.
+  // Hence there is no need for locks in these.
+  int proc_sample_rate_hz() const override;
+  int proc_split_sample_rate_hz() const override;
+  int num_input_channels() const override;
+  int num_output_channels() const override;
+  int num_reverse_channels() const override;
   int stream_delay_ms() const override;
-  bool was_stream_delay_set() const override;
-  void set_delay_offset_ms(int offset) override;
-  int delay_offset_ms() const override;
-  void set_stream_key_pressed(bool key_pressed) override;
-  int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
-  int StartDebugRecording(FILE* handle) override;
-  int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
-  int StopDebugRecording() override;
-  void UpdateHistogramsOnCallEnd() override;
+  bool was_stream_delay_set() const override
+      EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Methods returning pointers to APM submodules.
+  // No locks are aquired in those, as those locks
+  // would offer no protection (the submodules are
+  // created only once in a single-treaded manner
+  // during APM creation).
   EchoCancellation* echo_cancellation() const override;
   EchoControlMobile* echo_control_mobile() const override;
   GainControl* gain_control() const override;
@@ -117,116 +134,209 @@
 
  protected:
   // Overridden in a mock.
-  virtual int InitializeLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
+  virtual int InitializeLocked()
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
 
  private:
+  struct ApmPublicSubmodules;
+  struct ApmPrivateSubmodules;
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  // State for the debug dump.
+  struct ApmDebugDumpThreadState {
+    ApmDebugDumpThreadState() : event_msg(new audioproc::Event()) {}
+    rtc::scoped_ptr<audioproc::Event> event_msg;  // Protobuf message.
+    std::string event_str;  // Memory for protobuf serialization.
+
+    // Serialized string of last saved APM configuration.
+    std::string last_serialized_config;
+  };
+
+  struct ApmDebugDumpState {
+    ApmDebugDumpState() : debug_file(FileWrapper::Create()) {}
+    rtc::scoped_ptr<FileWrapper> debug_file;
+    ApmDebugDumpThreadState render;
+    ApmDebugDumpThreadState capture;
+  };
+#endif
+
+  // Method for modifying the formats struct that are called from both
+  // the render and capture threads. The check for whether modifications
+  // are needed is done while holding the render lock only, thereby avoiding
+  // that the capture thread blocks the render thread.
+  // The struct is modified in a single-threaded manner by holding both the
+  // render and capture locks.
+  int MaybeInitialize(const ProcessingConfig& config)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  int MaybeInitializeRender(const ProcessingConfig& processing_config)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  int MaybeInitializeCapture(const ProcessingConfig& processing_config)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  // Method for checking for the need of conversion. Accesses the formats
+  // structs in a read manner but the requirement for the render lock to be held
+  // was added as it currently anyway is always called in that manner.
+  bool rev_conversion_needed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+  bool render_check_rev_conversion_needed() const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  // Methods requiring APM running in a single-threaded manner.
+  // Are called with both the render and capture locks already
+  // acquired.
+  void InitializeExperimentalAgc()
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeTransient()
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeBeamformer()
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeIntelligibility()
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
   int InitializeLocked(const ProcessingConfig& config)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  int MaybeInitializeLockedRender(const ProcessingConfig& config)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  int MaybeInitializeLockedCapture(const ProcessingConfig& config)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  int MaybeInitializeLocked(const ProcessingConfig& config)
-      EXCLUSIVE_LOCKS_REQUIRED(crit_);
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+
+  // Capture-side exclusive methods possibly running APM in a multi-threaded
+  // manner that are called with the render lock already acquired.
+  int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  bool output_copy_needed(bool is_data_processed) const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  bool is_data_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  bool synthesis_needed(bool is_data_processed) const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  bool analysis_needed(bool is_data_processed) const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Render-side exclusive methods possibly running APM in a multi-threaded
+  // manner that are called with the render lock already acquired.
   // TODO(ekm): Remove once all clients updated to new interface.
-  int AnalyzeReverseStream(const float* const* src,
-                           const StreamConfig& input_config,
-                           const StreamConfig& output_config);
-  int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
+  int AnalyzeReverseStreamLocked(const float* const* src,
+                                 const StreamConfig& input_config,
+                                 const StreamConfig& output_config)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+  bool is_rev_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+  int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
 
-  bool is_data_processed() const;
-  bool output_copy_needed(bool is_data_processed) const;
-  bool synthesis_needed(bool is_data_processed) const;
-  bool analysis_needed(bool is_data_processed) const;
-  bool is_rev_processed() const;
-  bool rev_conversion_needed() const;
-  // TODO(peah): Add EXCLUSIVE_LOCKS_REQUIRED for the method below.
-  bool render_check_rev_conversion_needed() const;
-  void InitializeExperimentalAgc() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  void InitializeTransient() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  void InitializeBeamformer() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  void InitializeIntelligibility() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-  void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-
-  EchoCancellationImpl* echo_cancellation_;
-  EchoControlMobileImpl* echo_control_mobile_;
-  GainControlImpl* gain_control_;
-  HighPassFilterImpl* high_pass_filter_;
-  LevelEstimatorImpl* level_estimator_;
-  NoiseSuppressionImpl* noise_suppression_;
-  VoiceDetectionImpl* voice_detection_;
-  rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc_;
-
-  std::list<ProcessingComponent*> component_list_;
-  CriticalSectionWrapper* crit_;
-  rtc::scoped_ptr<AudioBuffer> render_audio_;
-  rtc::scoped_ptr<AudioBuffer> capture_audio_;
-  rtc::scoped_ptr<AudioConverter> render_converter_;
+// Debug dump methods that are internal and called without locks.
+// TODO(peah): Make thread safe.
 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
   // TODO(andrew): make this more graceful. Ideally we would split this stuff
   // out into a separate class with an "enabled" and "disabled" implementation.
-  int WriteMessageToDebugFile();
-  int WriteInitMessage();
+  static int WriteMessageToDebugFile(FileWrapper* debug_file,
+                                     rtc::CriticalSection* crit_debug,
+                                     ApmDebugDumpThreadState* debug_state);
+  int WriteInitMessage() EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
 
   // Writes Config message. If not |forced|, only writes the current config if
   // it is different from the last saved one; if |forced|, writes the config
   // regardless of the last saved.
-  int WriteConfigMessage(bool forced);
+  int WriteConfigMessage(bool forced) EXCLUSIVE_LOCKS_REQUIRED(crit_capture_)
+      EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
 
-  rtc::scoped_ptr<FileWrapper> debug_file_;
-  rtc::scoped_ptr<audioproc::Event> event_msg_;  // Protobuf message.
-  std::string event_str_;  // Memory for protobuf serialization.
+  // Critical section.
+  mutable rtc::CriticalSection crit_debug_;
 
-  // Serialized string of last saved APM configuration.
-  std::string last_serialized_config_;
+  // Debug dump state.
+  ApmDebugDumpState debug_dump_;
 #endif
 
+  // Critical sections.
+  mutable rtc::CriticalSection crit_render_ ACQUIRED_BEFORE(crit_capture_);
+  mutable rtc::CriticalSection crit_capture_;
+
+  // Structs containing the pointers to the submodules.
+  rtc::scoped_ptr<ApmPublicSubmodules> public_submodules_;
+  rtc::scoped_ptr<ApmPrivateSubmodules> private_submodules_
+      GUARDED_BY(crit_capture_);
+
   // State that is written to while holding both the render and capture locks
-  // but can be read while holding only one of the locks.
-  struct SharedState {
-    SharedState()
+  // but can be read without any lock being held.
+  // As this is only accessed internally of APM, and all internal methods in APM
+  // either are holding the render or capture locks, this construct is safe as
+  // it is not possible to read the variables while writing them.
+  struct ApmFormatState {
+    ApmFormatState()
         :  // Format of processing streams at input/output call sites.
-          api_format_({{{kSampleRate16kHz, 1, false},
-                        {kSampleRate16kHz, 1, false},
-                        {kSampleRate16kHz, 1, false},
-                        {kSampleRate16kHz, 1, false}}}) {}
-    ProcessingConfig api_format_;
-  } shared_state_;
+          api_format({{{kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false}}}),
+          rev_proc_format(kSampleRate16kHz, 1) {}
+    ProcessingConfig api_format;
+    StreamConfig rev_proc_format;
+  } formats_;
 
-  // Only the rate and samples fields of fwd_proc_format_ are used because the
-  // forward processing number of channels is mutable and is tracked by the
-  // capture_audio_.
-  StreamConfig fwd_proc_format_;
-  StreamConfig rev_proc_format_;
-  int split_rate_;
+  // APM constants.
+  const struct ApmConstants {
+    ApmConstants(int agc_startup_min_volume,
+                 const std::vector<Point> array_geometry,
+                 SphericalPointf target_direction,
+                 bool use_new_agc,
+                 bool intelligibility_enabled,
+                 bool beamformer_enabled)
+        :  // Format of processing streams at input/output call sites.
+          agc_startup_min_volume(agc_startup_min_volume),
+          array_geometry(array_geometry),
+          target_direction(target_direction),
+          use_new_agc(use_new_agc),
+          intelligibility_enabled(intelligibility_enabled),
+          beamformer_enabled(beamformer_enabled) {}
+    int agc_startup_min_volume;
+    std::vector<Point> array_geometry;
+    SphericalPointf target_direction;
+    bool use_new_agc;
+    bool intelligibility_enabled;
+    bool beamformer_enabled;
+  } constants_;
 
-  int stream_delay_ms_;
-  int delay_offset_ms_;
-  bool was_stream_delay_set_;
-  int last_stream_delay_ms_;
-  int last_aec_system_delay_ms_;
-  int stream_delay_jumps_;
-  int aec_system_delay_jumps_;
+  struct ApmCaptureState {
+    ApmCaptureState(bool transient_suppressor_enabled)
+        : aec_system_delay_jumps(-1),
+          delay_offset_ms(0),
+          was_stream_delay_set(false),
+          last_stream_delay_ms(0),
+          last_aec_system_delay_ms(0),
+          stream_delay_jumps(-1),
+          output_will_be_muted(false),
+          key_pressed(false),
+          transient_suppressor_enabled(transient_suppressor_enabled),
+          fwd_proc_format(kSampleRate16kHz),
+          split_rate(kSampleRate16kHz) {}
+    int aec_system_delay_jumps;
+    int delay_offset_ms;
+    bool was_stream_delay_set;
+    int last_stream_delay_ms;
+    int last_aec_system_delay_ms;
+    int stream_delay_jumps;
+    bool output_will_be_muted;
+    bool key_pressed;
+    bool transient_suppressor_enabled;
+    rtc::scoped_ptr<AudioBuffer> capture_audio;
+    // Only the rate and samples fields of fwd_proc_format_ are used because the
+    // forward processing number of channels is mutable and is tracked by the
+    // capture_audio_.
+    StreamConfig fwd_proc_format;
+    int split_rate;
+  } capture_ GUARDED_BY(crit_capture_);
 
-  bool output_will_be_muted_ GUARDED_BY(crit_);
+  struct ApmCaptureNonLockedState {
+    ApmCaptureNonLockedState()
+        : fwd_proc_format(kSampleRate16kHz),
+          split_rate(kSampleRate16kHz),
+          stream_delay_ms(0) {}
+    // Only the rate and samples fields of fwd_proc_format_ are used because the
+    // forward processing number of channels is mutable and is tracked by the
+    // capture_audio_.
+    StreamConfig fwd_proc_format;
+    int split_rate;
+    int stream_delay_ms;
+  } capture_nonlocked_;
 
-  bool key_pressed_;
-
-  // Only set through the constructor's Config parameter.
-  const bool use_new_agc_;
-  rtc::scoped_ptr<AgcManagerDirect> agc_manager_ GUARDED_BY(crit_);
-  int agc_startup_min_volume_;
-
-  bool transient_suppressor_enabled_;
-  rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
-  const bool beamformer_enabled_;
-  rtc::scoped_ptr<Beamformer<float>> beamformer_;
-  const std::vector<Point> array_geometry_;
-  const SphericalPointf target_direction_;
-
-  bool intelligibility_enabled_;
-  rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer_;
+  struct ApmRenderState {
+    rtc::scoped_ptr<AudioConverter> render_converter;
+    rtc::scoped_ptr<AudioBuffer> render_audio;
+  } render_ GUARDED_BY(crit_render_);
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
index dcbaa28..b826430 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -30,41 +30,6 @@
 
 class AudioProcessingImplLockTest;
 
-// Sleeps a random time between 0 and max_sleep milliseconds.
-void SleepRandomMs(int max_sleep, test::Random* rand_gen) {
-  int sleeptime = rand_gen->Rand(0, max_sleep);
-  SleepMs(sleeptime);
-}
-
-// Populates a float audio frame with random data.
-void PopulateAudioFrame(float** frame,
-                        float amplitude,
-                        size_t num_channels,
-                        size_t samples_per_channel,
-                        test::Random* rand_gen) {
-  for (size_t ch = 0; ch < num_channels; ch++) {
-    for (size_t k = 0; k < samples_per_channel; k++) {
-      // Store random 16 bit quantized float number between +-amplitude.
-      frame[ch][k] = amplitude * (2 * rand_gen->Rand<float>() - 1);
-    }
-  }
-}
-
-// Populates an audioframe frame of AudioFrame type with random data.
-void PopulateAudioFrame(AudioFrame* frame,
-                        int16_t amplitude,
-                        test::Random* rand_gen) {
-  ASSERT_GT(amplitude, 0);
-  ASSERT_LE(amplitude, 32767);
-  for (int ch = 0; ch < frame->num_channels_; ch++) {
-    for (int k = 0; k < static_cast<int>(frame->samples_per_channel_); k++) {
-      // Store random 16 bit number between -(amplitude+1) and
-      // amplitude.
-      frame->data_[k * ch] = rand_gen->Rand(2 * amplitude + 1) - amplitude - 1;
-    }
-  }
-}
-
 // Type of the render thread APM API call to use in the test.
 enum class RenderApiImpl {
   ProcessReverseStreamImpl1,
@@ -97,6 +62,31 @@
   BasicWebRtcAecSettingsWithAecMobile
 };
 
+// Thread-safe random number generator wrapper.
+class RandomGenerator {
+ public:
+  RandomGenerator() : rand_gen_(42U) {}
+
+  int RandInt(int min, int max) {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand(min, max);
+  }
+
+  int RandInt(int max) {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand(max);
+  }
+
+  float RandFloat() {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand<float>();
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  test::Random rand_gen_ GUARDED_BY(crit_);
+};
+
 // Variables related to the audio data and formats.
 struct AudioFrameData {
   explicit AudioFrameData(int max_frame_size) {
@@ -331,7 +321,7 @@
 class CaptureProcessor {
  public:
   CaptureProcessor(int max_frame_size,
-                   test::Random* rand_gen,
+                   RandomGenerator* rand_gen,
                    FrameCounters* shared_counters_state,
                    CaptureSideCalledChecker* capture_call_checker,
                    AudioProcessingImplLockTest* test_framework,
@@ -348,7 +338,7 @@
   void CallApmCaptureSide();
   void ApplyRuntimeSettingScheme();
 
-  test::Random* rand_gen_ = nullptr;
+  RandomGenerator* rand_gen_ = nullptr;
   FrameCounters* frame_counters_ = nullptr;
   CaptureSideCalledChecker* capture_call_checker_ = nullptr;
   AudioProcessingImplLockTest* test_ = nullptr;
@@ -360,13 +350,13 @@
 // Class for handling the stats processing.
 class StatsProcessor {
  public:
-  StatsProcessor(test::Random* rand_gen,
+  StatsProcessor(RandomGenerator* rand_gen,
                  TestConfig* test_config,
                  AudioProcessing* apm);
   bool Process();
 
  private:
-  test::Random* rand_gen_ = nullptr;
+  RandomGenerator* rand_gen_ = nullptr;
   TestConfig* test_config_ = nullptr;
   AudioProcessing* apm_ = nullptr;
 };
@@ -375,7 +365,7 @@
 class RenderProcessor {
  public:
   RenderProcessor(int max_frame_size,
-                  test::Random* rand_gen,
+                  RandomGenerator* rand_gen,
                   FrameCounters* shared_counters_state,
                   CaptureSideCalledChecker* capture_call_checker,
                   AudioProcessingImplLockTest* test_framework,
@@ -392,7 +382,7 @@
   void CallApmRenderSide();
   void ApplyRuntimeSettingScheme();
 
-  test::Random* rand_gen_ = nullptr;
+  RandomGenerator* rand_gen_ = nullptr;
   FrameCounters* frame_counters_ = nullptr;
   CaptureSideCalledChecker* capture_call_checker_ = nullptr;
   AudioProcessingImplLockTest* test_ = nullptr;
@@ -459,7 +449,7 @@
   rtc::PlatformThread render_thread_;
   rtc::PlatformThread capture_thread_;
   rtc::PlatformThread stats_thread_;
-  mutable test::Random rand_gen_;
+  mutable RandomGenerator rand_gen_;
 
   rtc::scoped_ptr<AudioProcessing> apm_;
   TestConfig test_config_;
@@ -470,12 +460,47 @@
   StatsProcessor stats_thread_state_;
 };
 
+// Sleeps a random time between 0 and max_sleep milliseconds.
+void SleepRandomMs(int max_sleep, RandomGenerator* rand_gen) {
+  int sleeptime = rand_gen->RandInt(0, max_sleep);
+  SleepMs(sleeptime);
+}
+
+// Populates a float audio frame with random data.
+void PopulateAudioFrame(float** frame,
+                        float amplitude,
+                        size_t num_channels,
+                        size_t samples_per_channel,
+                        RandomGenerator* rand_gen) {
+  for (size_t ch = 0; ch < num_channels; ch++) {
+    for (size_t k = 0; k < samples_per_channel; k++) {
+      // Store random 16 bit quantized float number between +-amplitude.
+      frame[ch][k] = amplitude * (2 * rand_gen->RandFloat() - 1);
+    }
+  }
+}
+
+// Populates an audioframe frame of AudioFrame type with random data.
+void PopulateAudioFrame(AudioFrame* frame,
+                        int16_t amplitude,
+                        RandomGenerator* rand_gen) {
+  ASSERT_GT(amplitude, 0);
+  ASSERT_LE(amplitude, 32767);
+  for (int ch = 0; ch < frame->num_channels_; ch++) {
+    for (int k = 0; k < static_cast<int>(frame->samples_per_channel_); k++) {
+      // Store random 16 bit number between -(amplitude+1) and
+      // amplitude.
+      frame->data_[k * ch] =
+          rand_gen->RandInt(2 * amplitude + 1) - amplitude - 1;
+    }
+  }
+}
+
 AudioProcessingImplLockTest::AudioProcessingImplLockTest()
     : test_complete_(EventWrapper::Create()),
       render_thread_(RenderProcessorThreadFunc, this, "render"),
       capture_thread_(CaptureProcessorThreadFunc, this, "capture"),
       stats_thread_(StatsProcessorThreadFunc, this, "stats"),
-      rand_gen_(42U),
       apm_(AudioProcessingImpl::Create()),
       render_thread_state_(kMaxFrameSize,
                            &rand_gen_,
@@ -513,7 +538,7 @@
   ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
 
   ASSERT_EQ(apm_->kNoError,
-            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
   ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
 
   ASSERT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
@@ -552,7 +577,7 @@
   stats_thread_.Stop();
 }
 
-StatsProcessor::StatsProcessor(test::Random* rand_gen,
+StatsProcessor::StatsProcessor(RandomGenerator* rand_gen,
                                TestConfig* test_config,
                                AudioProcessing* apm)
     : rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {}
@@ -586,7 +611,7 @@
 
 CaptureProcessor::CaptureProcessor(
     int max_frame_size,
-    test::Random* rand_gen,
+    RandomGenerator* rand_gen,
     FrameCounters* shared_counters_state,
     CaptureSideCalledChecker* capture_call_checker,
     AudioProcessingImplLockTest* test_framework,
@@ -824,8 +849,6 @@
         apm_->set_stream_key_pressed(true);
         apm_->set_delay_offset_ms(15);
         EXPECT_EQ(apm_->delay_offset_ms(), 15);
-        EXPECT_GE(apm_->num_reverse_channels(), 0);
-        EXPECT_LE(apm_->num_reverse_channels(), 2);
       } else {
         ASSERT_EQ(AudioProcessing::Error::kNoError,
                   apm_->set_stream_delay_ms(50));
@@ -833,9 +856,6 @@
         apm_->set_delay_offset_ms(20);
         EXPECT_EQ(apm_->delay_offset_ms(), 20);
         apm_->delay_offset_ms();
-        apm_->num_reverse_channels();
-        EXPECT_GE(apm_->num_reverse_channels(), 0);
-        EXPECT_LE(apm_->num_reverse_channels(), 2);
       }
       break;
     default:
@@ -852,7 +872,7 @@
 const float RenderProcessor::kRenderInputFloatLevel = 0.5f;
 
 RenderProcessor::RenderProcessor(int max_frame_size,
-                                 test::Random* rand_gen,
+                                 RandomGenerator* rand_gen,
                                  FrameCounters* shared_counters_state,
                                  CaptureSideCalledChecker* capture_call_checker,
                                  AudioProcessingImplLockTest* test_framework,
@@ -1104,7 +1124,7 @@
     ::testing::ValuesIn(TestConfig::GenerateExtensiveTestConfigs()));
 
 INSTANTIATE_TEST_CASE_P(
-    DISABLED_AudioProcessingImplLockBrief,
+    AudioProcessingImplLockBrief,
     AudioProcessingImplLockTest,
     ::testing::ValuesIn(TestConfig::GenerateBriefTestConfigs()));
 
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index 14d1fc8..6d0373d 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -18,7 +18,6 @@
 }
 #include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 namespace webrtc {
 
@@ -63,10 +62,12 @@
 }  // namespace
 
 EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
-                                           CriticalSectionWrapper* crit)
+                                           rtc::CriticalSection* crit_render,
+                                           rtc::CriticalSection* crit_capture)
     : ProcessingComponent(),
       apm_(apm),
-      crit_(crit),
+      crit_render_(crit_render),
+      crit_capture_(crit_capture),
       drift_compensation_enabled_(false),
       metrics_enabled_(false),
       suppression_level_(kModerateSuppression),
@@ -76,19 +77,24 @@
       delay_logging_enabled_(false),
       extended_filter_enabled_(false),
       delay_agnostic_enabled_(false),
-      render_queue_element_max_size_(0) {}
+      render_queue_element_max_size_(0) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
 
 EchoCancellationImpl::~EchoCancellationImpl() {}
 
 int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+  rtc::CritScope cs_render(crit_render_);
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == apm_->num_reverse_channels());
 
-  int err = apm_->kNoError;
+  int err = AudioProcessing::kNoError;
 
   // The ordering convention must be followed to pass to the correct AEC.
   size_t handle_index = 0;
@@ -102,7 +108,7 @@
           my_handle, audio->split_bands_const_f(j)[kBand0To8kHz],
           audio->num_frames_per_band());
 
-      if (err != apm_->kNoError) {
+      if (err != AudioProcessing::kNoError) {
         return MapError(err);  // TODO(ajm): warning possible?
       }
 
@@ -116,18 +122,20 @@
 
   // Insert the samples into the queue.
   if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
     ReadQueuedRenderData();
 
     // Retry the insert (should always work).
     RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 // Read chunks of data that were received and queued on the render side from
 // a queue. All the data chunks are buffered into the farend signal of the AEC.
 void EchoCancellationImpl::ReadQueuedRenderData() {
+  rtc::CritScope cs_capture(crit_capture_);
   if (!is_component_enabled()) {
     return;
   }
@@ -152,22 +160,23 @@
 }
 
 int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs_capture(crit_capture_);
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   if (!apm_->was_stream_delay_set()) {
-    return apm_->kStreamParameterNotSetError;
+    return AudioProcessing::kStreamParameterNotSetError;
   }
 
   if (drift_compensation_enabled_ && !was_stream_drift_set_) {
-    return apm_->kStreamParameterNotSetError;
+    return AudioProcessing::kStreamParameterNotSetError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == apm_->num_output_channels());
 
-  int err = apm_->kNoError;
+  int err = AudioProcessing::kNoError;
 
   // The ordering convention must be followed to pass to the correct AEC.
   size_t handle_index = 0;
@@ -175,26 +184,22 @@
   for (int i = 0; i < audio->num_channels(); i++) {
     for (int j = 0; j < apm_->num_reverse_channels(); j++) {
       Handle* my_handle = handle(handle_index);
-      err = WebRtcAec_Process(
-          my_handle,
-          audio->split_bands_const_f(i),
-          audio->num_bands(),
-          audio->split_bands_f(i),
-          audio->num_frames_per_band(),
-          apm_->stream_delay_ms(),
-          stream_drift_samples_);
+      err = WebRtcAec_Process(my_handle, audio->split_bands_const_f(i),
+                              audio->num_bands(), audio->split_bands_f(i),
+                              audio->num_frames_per_band(),
+                              apm_->stream_delay_ms(), stream_drift_samples_);
 
-      if (err != apm_->kNoError) {
+      if (err != AudioProcessing::kNoError) {
         err = MapError(err);
         // TODO(ajm): Figure out how to return warnings properly.
-        if (err != apm_->kBadStreamParameterWarning) {
+        if (err != AudioProcessing::kBadStreamParameterWarning) {
           return err;
         }
       }
 
       int status = 0;
       err = WebRtcAec_get_echo_status(my_handle, &status);
-      if (err != apm_->kNoError) {
+      if (err != AudioProcessing::kNoError) {
         return MapError(err);
       }
 
@@ -207,77 +212,92 @@
   }
 
   was_stream_drift_set_ = false;
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int EchoCancellationImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   // Ensure AEC and AECM are not both enabled.
+  // The is_enabled call is safe from a deadlock perspective
+  // as both locks are already held in the correct order.
   if (enable && apm_->echo_control_mobile()->is_enabled()) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   return EnableComponent(enable);
 }
 
 bool EchoCancellationImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return is_component_enabled();
 }
 
 int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (MapSetting(level) == -1) {
-    return apm_->kBadParameterError;
+  {
+    if (MapSetting(level) == -1) {
+      return AudioProcessing::kBadParameterError;
+    }
+    rtc::CritScope cs(crit_capture_);
+    suppression_level_ = level;
   }
-
-  suppression_level_ = level;
   return Configure();
 }
 
 EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
     const {
+  rtc::CritScope cs(crit_capture_);
   return suppression_level_;
 }
 
 int EchoCancellationImpl::enable_drift_compensation(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
-  drift_compensation_enabled_ = enable;
+  {
+    rtc::CritScope cs(crit_capture_);
+    drift_compensation_enabled_ = enable;
+  }
   return Configure();
 }
 
 bool EchoCancellationImpl::is_drift_compensation_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return drift_compensation_enabled_;
 }
 
 void EchoCancellationImpl::set_stream_drift_samples(int drift) {
+  rtc::CritScope cs(crit_capture_);
   was_stream_drift_set_ = true;
   stream_drift_samples_ = drift;
 }
 
 int EchoCancellationImpl::stream_drift_samples() const {
+  rtc::CritScope cs(crit_capture_);
   return stream_drift_samples_;
 }
 
 int EchoCancellationImpl::enable_metrics(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
-  metrics_enabled_ = enable;
+  {
+    rtc::CritScope cs(crit_capture_);
+    metrics_enabled_ = enable;
+  }
   return Configure();
 }
 
 bool EchoCancellationImpl::are_metrics_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return metrics_enabled_;
 }
 
 // TODO(ajm): we currently just use the metrics from the first AEC. Think more
 //            aboue the best way to extend this to multi-channel.
 int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (metrics == NULL) {
-    return apm_->kNullPointerError;
+    return AudioProcessing::kNullPointerError;
   }
 
   if (!is_component_enabled() || !metrics_enabled_) {
-    return apm_->kNotEnabledError;
+    return AudioProcessing::kNotEnabledError;
   }
 
   AecMetrics my_metrics;
@@ -286,7 +306,7 @@
 
   Handle* my_handle = static_cast<Handle*>(handle(0));
   int err = WebRtcAec_GetMetrics(my_handle, &my_metrics);
-  if (err != apm_->kNoError) {
+  if (err != AudioProcessing::kNoError) {
     return MapError(err);
   }
 
@@ -310,63 +330,70 @@
   metrics->a_nlp.maximum = my_metrics.aNlp.max;
   metrics->a_nlp.minimum = my_metrics.aNlp.min;
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 bool EchoCancellationImpl::stream_has_echo() const {
+  rtc::CritScope cs(crit_capture_);
   return stream_has_echo_;
 }
 
 int EchoCancellationImpl::enable_delay_logging(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
-  delay_logging_enabled_ = enable;
+  {
+    rtc::CritScope cs(crit_capture_);
+    delay_logging_enabled_ = enable;
+  }
   return Configure();
 }
 
 bool EchoCancellationImpl::is_delay_logging_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return delay_logging_enabled_;
 }
 
 bool EchoCancellationImpl::is_delay_agnostic_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return delay_agnostic_enabled_;
 }
 
 bool EchoCancellationImpl::is_extended_filter_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return extended_filter_enabled_;
 }
 
 // TODO(bjornv): How should we handle the multi-channel case?
 int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
+  rtc::CritScope cs(crit_capture_);
   float fraction_poor_delays = 0;
   return GetDelayMetrics(median, std, &fraction_poor_delays);
 }
 
 int EchoCancellationImpl::GetDelayMetrics(int* median, int* std,
                                           float* fraction_poor_delays) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (median == NULL) {
-    return apm_->kNullPointerError;
+    return AudioProcessing::kNullPointerError;
   }
   if (std == NULL) {
-    return apm_->kNullPointerError;
+    return AudioProcessing::kNullPointerError;
   }
 
   if (!is_component_enabled() || !delay_logging_enabled_) {
-    return apm_->kNotEnabledError;
+    return AudioProcessing::kNotEnabledError;
   }
 
   Handle* my_handle = static_cast<Handle*>(handle(0));
   const int err =
       WebRtcAec_GetDelayMetrics(my_handle, median, std, fraction_poor_delays);
-  if (err != apm_->kNoError) {
+  if (err != AudioProcessing::kNoError) {
     return MapError(err);
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 struct AecCore* EchoCancellationImpl::aec_core() const {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (!is_component_enabled()) {
     return NULL;
   }
@@ -376,13 +403,16 @@
 
 int EchoCancellationImpl::Initialize() {
   int err = ProcessingComponent::Initialize();
-  if (err != apm_->kNoError || !is_component_enabled()) {
-    return err;
+  {
+    rtc::CritScope cs(crit_capture_);
+    if (err != AudioProcessing::kNoError || !is_component_enabled()) {
+      return err;
+    }
   }
 
   AllocateRenderQueue();
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 void EchoCancellationImpl::AllocateRenderQueue() {
@@ -390,6 +420,9 @@
       static_cast<size_t>(1),
       kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
 
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
   // Reallocate the queue if the queue item size is too small to fit the
   // data to put in the queue.
   if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
@@ -410,8 +443,11 @@
 }
 
 void EchoCancellationImpl::SetExtraOptions(const Config& config) {
-  extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
-  delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
+  {
+    rtc::CritScope cs(crit_capture_);
+    extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
+    delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
+  }
   Configure();
 }
 
@@ -425,23 +461,25 @@
 }
 
 int EchoCancellationImpl::InitializeHandle(void* handle) const {
+  // Not locked as it only relies on APM public API which is threadsafe.
+
   assert(handle != NULL);
   // TODO(ajm): Drift compensation is disabled in practice. If restored, it
   // should be managed internally and not depend on the hardware sample rate.
   // For now, just hardcode a 48 kHz value.
   return WebRtcAec_Init(static_cast<Handle*>(handle),
-                       apm_->proc_sample_rate_hz(),
-                       48000);
+                        apm_->proc_sample_rate_hz(), 48000);
 }
 
 int EchoCancellationImpl::ConfigureHandle(void* handle) const {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   assert(handle != NULL);
   AecConfig config;
   config.metricsMode = metrics_enabled_;
   config.nlpMode = MapSetting(suppression_level_);
   config.skewMode = drift_compensation_enabled_;
   config.delay_logging = delay_logging_enabled_;
-
   WebRtcAec_enable_extended_filter(
       WebRtcAec_aec_core(static_cast<Handle*>(handle)),
       extended_filter_enabled_ ? 1 : 0);
@@ -452,11 +490,13 @@
 }
 
 int EchoCancellationImpl::num_handles_required() const {
+  // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_output_channels() *
          apm_->num_reverse_channels();
 }
 
 int EchoCancellationImpl::GetHandleError(void* handle) const {
+  // Not locked as it does not rely on anything in the state.
   assert(handle != NULL);
   return AudioProcessing::kUnspecifiedError;
 }
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.h b/webrtc/modules/audio_processing/echo_cancellation_impl.h
index 96d236e..9418fbf 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -11,6 +11,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/swap_queue.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -19,13 +20,13 @@
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class EchoCancellationImpl : public EchoCancellation,
                              public ProcessingComponent {
  public:
   EchoCancellationImpl(const AudioProcessing* apm,
-                       CriticalSectionWrapper* crit);
+                       rtc::CriticalSection* crit_render,
+                       rtc::CriticalSection* crit_capture);
   virtual ~EchoCancellationImpl();
 
   int ProcessRenderAudio(const AudioBuffer* audio);
@@ -40,11 +41,11 @@
   // ProcessingComponent implementation.
   int Initialize() override;
   void SetExtraOptions(const Config& config) override;
-
   bool is_delay_agnostic_enabled() const;
   bool is_extended_filter_enabled() const;
 
   // Reads render side data that has been queued on the render call.
+  // Called holding the capture lock.
   void ReadQueuedRenderData();
 
  private:
@@ -63,6 +64,7 @@
   int GetDelayMetrics(int* median,
                       int* std,
                       float* fraction_poor_delays) override;
+
   struct AecCore* aec_core() const override;
 
   // ProcessingComponent implementation.
@@ -75,22 +77,28 @@
 
   void AllocateRenderQueue();
 
+  // Not guarded as its public API is thread safe.
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
 
-  bool drift_compensation_enabled_;
-  bool metrics_enabled_;
-  SuppressionLevel suppression_level_;
-  int stream_drift_samples_;
-  bool was_stream_drift_set_;
-  bool stream_has_echo_;
-  bool delay_logging_enabled_;
-  bool extended_filter_enabled_;
-  bool delay_agnostic_enabled_;
+  rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
 
-  size_t render_queue_element_max_size_;
-  std::vector<float> render_queue_buffer_;
-  std::vector<float> capture_queue_buffer_;
+  bool drift_compensation_enabled_ GUARDED_BY(crit_capture_);
+  bool metrics_enabled_ GUARDED_BY(crit_capture_);
+  SuppressionLevel suppression_level_ GUARDED_BY(crit_capture_);
+  int stream_drift_samples_ GUARDED_BY(crit_capture_);
+  bool was_stream_drift_set_ GUARDED_BY(crit_capture_);
+  bool stream_has_echo_ GUARDED_BY(crit_capture_);
+  bool delay_logging_enabled_ GUARDED_BY(crit_capture_);
+  bool extended_filter_enabled_ GUARDED_BY(crit_capture_);
+  bool delay_agnostic_enabled_ GUARDED_BY(crit_capture_);
+
+  size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+      GUARDED_BY(crit_capture_);
+  std::vector<float> render_queue_buffer_ GUARDED_BY(crit_render_);
+  std::vector<float> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+  // Lock protection not needed.
   rtc::scoped_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
       render_signal_queue_;
 };
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index a32c77c..5ff7bd7 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -15,7 +15,6 @@
 
 #include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/include/logging.h"
 
 namespace webrtc {
@@ -69,14 +68,20 @@
 }
 
 EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessing* apm,
-                                             CriticalSectionWrapper* crit)
+                                             rtc::CriticalSection* crit_render,
+                                             rtc::CriticalSection* crit_capture)
     : ProcessingComponent(),
       apm_(apm),
-      crit_(crit),
+      crit_render_(crit_render),
+      crit_capture_(crit_capture),
       routing_mode_(kSpeakerphone),
       comfort_noise_enabled_(true),
       external_echo_path_(NULL),
-      render_queue_element_max_size_(0) {}
+      render_queue_element_max_size_(0) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
 
 EchoControlMobileImpl::~EchoControlMobileImpl() {
     if (external_echo_path_ != NULL) {
@@ -86,15 +91,16 @@
 }
 
 int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+  rtc::CritScope cs_render(crit_render_);
+
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == apm_->num_reverse_channels());
 
-  int err = apm_->kNoError;
-
+  int err = AudioProcessing::kNoError;
   // The ordering convention must be followed to pass to the correct AECM.
   size_t handle_index = 0;
   render_queue_buffer_.clear();
@@ -105,7 +111,7 @@
           my_handle, audio->split_bands_const(j)[kBand0To8kHz],
           audio->num_frames_per_band());
 
-      if (err != apm_->kNoError)
+      if (err != AudioProcessing::kNoError)
         return MapError(err);  // TODO(ajm): warning possible?);
 
       // Buffer the samples in the render queue.
@@ -120,18 +126,21 @@
 
   // Insert the samples into the queue.
   if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
     ReadQueuedRenderData();
 
     // Retry the insert (should always work).
     RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 // Read chunks of data that were received and queued on the render side from
 // a queue. All the data chunks are buffered into the farend signal of the AEC.
 void EchoControlMobileImpl::ReadQueuedRenderData() {
+  rtc::CritScope cs_capture(crit_capture_);
+
   if (!is_component_enabled()) {
     return;
   }
@@ -156,18 +165,20 @@
 }
 
 int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs_capture(crit_capture_);
+
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   if (!apm_->was_stream_delay_set()) {
-    return apm_->kStreamParameterNotSetError;
+    return AudioProcessing::kStreamParameterNotSetError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == apm_->num_output_channels());
 
-  int err = apm_->kNoError;
+  int err = AudioProcessing::kNoError;
 
   // The ordering convention must be followed to pass to the correct AECM.
   size_t handle_index = 0;
@@ -190,86 +201,99 @@
           audio->num_frames_per_band(),
           apm_->stream_delay_ms());
 
-      if (err != apm_->kNoError)
+      if (err != AudioProcessing::kNoError)
         return MapError(err);
 
       handle_index++;
     }
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int EchoControlMobileImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
   // Ensure AEC and AECM are not both enabled.
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  // The is_enabled call is safe from a deadlock perspective
+  // as both locks are allready held in the correct order.
   if (enable && apm_->echo_cancellation()->is_enabled()) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   return EnableComponent(enable);
 }
 
 bool EchoControlMobileImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return is_component_enabled();
 }
 
 int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
-  CriticalSectionScoped crit_scoped(crit_);
   if (MapSetting(mode) == -1) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
-  routing_mode_ = mode;
+  {
+    rtc::CritScope cs(crit_capture_);
+    routing_mode_ = mode;
+  }
   return Configure();
 }
 
 EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
     const {
+  rtc::CritScope cs(crit_capture_);
   return routing_mode_;
 }
 
 int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
-  comfort_noise_enabled_ = enable;
+  {
+    rtc::CritScope cs(crit_capture_);
+    comfort_noise_enabled_ = enable;
+  }
   return Configure();
 }
 
 bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return comfort_noise_enabled_;
 }
 
 int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
                                        size_t size_bytes) {
-  CriticalSectionScoped crit_scoped(crit_);
-  if (echo_path == NULL) {
-    return apm_->kNullPointerError;
-  }
-  if (size_bytes != echo_path_size_bytes()) {
-    // Size mismatch
-    return apm_->kBadParameterError;
-  }
+  {
+    rtc::CritScope cs_render(crit_render_);
+    rtc::CritScope cs_capture(crit_capture_);
+    if (echo_path == NULL) {
+      return AudioProcessing::kNullPointerError;
+    }
+    if (size_bytes != echo_path_size_bytes()) {
+      // Size mismatch
+      return AudioProcessing::kBadParameterError;
+    }
 
-  if (external_echo_path_ == NULL) {
-    external_echo_path_ = new unsigned char[size_bytes];
+    if (external_echo_path_ == NULL) {
+      external_echo_path_ = new unsigned char[size_bytes];
+    }
+    memcpy(external_echo_path_, echo_path, size_bytes);
   }
-  memcpy(external_echo_path_, echo_path, size_bytes);
 
   return Initialize();
 }
 
 int EchoControlMobileImpl::GetEchoPath(void* echo_path,
                                        size_t size_bytes) const {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (echo_path == NULL) {
-    return apm_->kNullPointerError;
+    return AudioProcessing::kNullPointerError;
   }
   if (size_bytes != echo_path_size_bytes()) {
     // Size mismatch
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
   if (!is_component_enabled()) {
-    return apm_->kNotEnabledError;
+    return AudioProcessing::kNotEnabledError;
   }
 
   // Get the echo path from the first channel
@@ -278,27 +302,30 @@
   if (err != 0)
     return MapError(err);
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int EchoControlMobileImpl::Initialize() {
-  if (!is_component_enabled()) {
-    return apm_->kNoError;
+  {
+    rtc::CritScope cs_capture(crit_capture_);
+    if (!is_component_enabled()) {
+      return AudioProcessing::kNoError;
+    }
   }
 
-  if (apm_->proc_sample_rate_hz() > apm_->kSampleRate16kHz) {
+  if (apm_->proc_sample_rate_hz() > AudioProcessing::kSampleRate16kHz) {
     LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
-    return apm_->kBadSampleRateError;
+    return AudioProcessing::kBadSampleRateError;
   }
 
   int err = ProcessingComponent::Initialize();
-  if (err != apm_->kNoError) {
+  if (err != AudioProcessing::kNoError) {
     return err;
   }
 
   AllocateRenderQueue();
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 void EchoControlMobileImpl::AllocateRenderQueue() {
@@ -306,6 +333,9 @@
       static_cast<size_t>(1),
       kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
 
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
   // Reallocate the queue if the queue item size is too small to fit the
   // data to put in the queue.
   if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
@@ -330,10 +360,14 @@
 }
 
 void EchoControlMobileImpl::DestroyHandle(void* handle) const {
+  // This method is only called in a non-concurrent manner during APM
+  // destruction.
   WebRtcAecm_Free(static_cast<Handle*>(handle));
 }
 
 int EchoControlMobileImpl::InitializeHandle(void* handle) const {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   assert(handle != NULL);
   Handle* my_handle = static_cast<Handle*>(handle);
   if (WebRtcAecm_Init(my_handle, apm_->proc_sample_rate_hz()) != 0) {
@@ -347,10 +381,12 @@
     }
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   AecmConfig config;
   config.cngMode = comfort_noise_enabled_;
   config.echoMode = MapSetting(routing_mode_);
@@ -359,11 +395,13 @@
 }
 
 int EchoControlMobileImpl::num_handles_required() const {
+  // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_output_channels() *
          apm_->num_reverse_channels();
 }
 
 int EchoControlMobileImpl::GetHandleError(void* handle) const {
+  // Not locked as it does not rely on anything in the state.
   assert(handle != NULL);
   return AudioProcessing::kUnspecifiedError;
 }
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.h b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
index 8bfa1d7..3b5dbf3 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.h
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -11,6 +11,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/common_audio/swap_queue.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -19,13 +20,14 @@
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class EchoControlMobileImpl : public EchoControlMobile,
                               public ProcessingComponent {
  public:
   EchoControlMobileImpl(const AudioProcessing* apm,
-                        CriticalSectionWrapper* crit);
+                        rtc::CriticalSection* crit_render,
+                        rtc::CriticalSection* crit_capture);
+
   virtual ~EchoControlMobileImpl();
 
   int ProcessRenderAudio(const AudioBuffer* audio);
@@ -51,6 +53,7 @@
   int GetEchoPath(void* echo_path, size_t size_bytes) const override;
 
   // ProcessingComponent implementation.
+  // Called holding both the render and capture locks.
   void* CreateHandle() const override;
   int InitializeHandle(void* handle) const override;
   int ConfigureHandle(void* handle) const override;
@@ -60,15 +63,24 @@
 
   void AllocateRenderQueue();
 
+  // Not guarded as its public API is thread safe.
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
-  RoutingMode routing_mode_;
-  bool comfort_noise_enabled_;
-  unsigned char* external_echo_path_;
 
-  size_t render_queue_element_max_size_;
-  std::vector<int16_t> render_queue_buffer_;
-  std::vector<int16_t> capture_queue_buffer_;
+  rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
+
+  RoutingMode routing_mode_ GUARDED_BY(crit_capture_);
+  bool comfort_noise_enabled_ GUARDED_BY(crit_capture_);
+  unsigned char* external_echo_path_ GUARDED_BY(crit_render_)
+      GUARDED_BY(crit_capture_);
+
+  size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+      GUARDED_BY(crit_capture_);
+
+  std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
+  std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+  // Lock protection not needed.
   rtc::scoped_ptr<
       SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
       render_signal_queue_;
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index 0eacd28..c8175dc 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -14,7 +14,6 @@
 
 #include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/agc/legacy/gain_control.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 namespace webrtc {
 
@@ -44,10 +43,12 @@
 }  // namespace
 
 GainControlImpl::GainControlImpl(const AudioProcessing* apm,
-                                 CriticalSectionWrapper* crit)
+                                 rtc::CriticalSection* crit_render,
+                                 rtc::CriticalSection* crit_capture)
     : ProcessingComponent(),
       apm_(apm),
-      crit_(crit),
+      crit_render_(crit_render),
+      crit_capture_(crit_capture),
       mode_(kAdaptiveAnalog),
       minimum_capture_level_(0),
       maximum_capture_level_(255),
@@ -57,13 +58,18 @@
       analog_capture_level_(0),
       was_analog_level_set_(false),
       stream_is_saturated_(false),
-      render_queue_element_max_size_(0) {}
+      render_queue_element_max_size_(0) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
 
 GainControlImpl::~GainControlImpl() {}
 
 int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_render_);
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
@@ -74,7 +80,7 @@
     int err =
         WebRtcAgc_GetAddFarendError(my_handle, audio->num_frames_per_band());
 
-    if (err != apm_->kNoError)
+    if (err != AudioProcessing::kNoError)
       return GetHandleError(my_handle);
 
     // Buffer the samples in the render queue.
@@ -85,18 +91,21 @@
 
   // Insert the samples into the queue.
   if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
     ReadQueuedRenderData();
 
     // Retry the insert (should always work).
     RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 // Read chunks of data that were received and queued on the render side from
 // a queue. All the data chunks are buffered into the farend signal of the AGC.
 void GainControlImpl::ReadQueuedRenderData() {
+  rtc::CritScope cs(crit_capture_);
+
   if (!is_component_enabled()) {
     return;
   }
@@ -116,14 +125,16 @@
 }
 
 int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_capture_);
+
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == num_handles());
 
-  int err = apm_->kNoError;
+  int err = AudioProcessing::kNoError;
 
   if (mode_ == kAdaptiveAnalog) {
     capture_levels_.assign(num_handles(), analog_capture_level_);
@@ -135,7 +146,7 @@
           audio->num_bands(),
           audio->num_frames_per_band());
 
-      if (err != apm_->kNoError) {
+      if (err != AudioProcessing::kNoError) {
         return GetHandleError(my_handle);
       }
     }
@@ -155,23 +166,25 @@
 
       capture_levels_[i] = capture_level_out;
 
-      if (err != apm_->kNoError) {
+      if (err != AudioProcessing::kNoError) {
         return GetHandleError(my_handle);
       }
 
     }
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_capture_);
+
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
-    return apm_->kStreamParameterNotSetError;
+    return AudioProcessing::kStreamParameterNotSetError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
@@ -183,6 +196,8 @@
     int32_t capture_level_out = 0;
     uint8_t saturation_warning = 0;
 
+    // The call to stream_has_echo() is ok from a deadlock perspective
+    // as the capture lock is allready held.
     int err = WebRtcAgc_Process(
         my_handle,
         audio->split_bands_const(i),
@@ -194,7 +209,7 @@
         apm_->echo_cancellation()->stream_has_echo(),
         &saturation_warning);
 
-    if (err != apm_->kNoError) {
+    if (err != AudioProcessing::kNoError) {
       return GetHandleError(my_handle);
     }
 
@@ -215,22 +230,24 @@
   }
 
   was_analog_level_set_ = false;
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 // TODO(ajm): ensure this is called under kAdaptiveAnalog.
 int GainControlImpl::set_stream_analog_level(int level) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
+
   was_analog_level_set_ = true;
   if (level < minimum_capture_level_ || level > maximum_capture_level_) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
   analog_capture_level_ = level;
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int GainControlImpl::stream_analog_level() {
+  rtc::CritScope cs(crit_capture_);
   // TODO(ajm): enable this assertion?
   //assert(mode_ == kAdaptiveAnalog);
 
@@ -238,18 +255,21 @@
 }
 
 int GainControlImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   return EnableComponent(enable);
 }
 
 bool GainControlImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return is_component_enabled();
 }
 
 int GainControlImpl::set_mode(Mode mode) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   if (MapSetting(mode) == -1) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   mode_ = mode;
@@ -257,22 +277,23 @@
 }
 
 GainControl::Mode GainControlImpl::mode() const {
+  rtc::CritScope cs(crit_capture_);
   return mode_;
 }
 
 int GainControlImpl::set_analog_level_limits(int minimum,
                                              int maximum) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (minimum < 0) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   if (maximum > 65535) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   if (maximum < minimum) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   minimum_capture_level_ = minimum;
@@ -282,21 +303,24 @@
 }
 
 int GainControlImpl::analog_level_minimum() const {
+  rtc::CritScope cs(crit_capture_);
   return minimum_capture_level_;
 }
 
 int GainControlImpl::analog_level_maximum() const {
+  rtc::CritScope cs(crit_capture_);
   return maximum_capture_level_;
 }
 
 bool GainControlImpl::stream_is_saturated() const {
+  rtc::CritScope cs(crit_capture_);
   return stream_is_saturated_;
 }
 
 int GainControlImpl::set_target_level_dbfs(int level) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (level > 31 || level < 0) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   target_level_dbfs_ = level;
@@ -304,13 +328,14 @@
 }
 
 int GainControlImpl::target_level_dbfs() const {
+  rtc::CritScope cs(crit_capture_);
   return target_level_dbfs_;
 }
 
 int GainControlImpl::set_compression_gain_db(int gain) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   if (gain < 0 || gain > 90) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   compression_gain_db_ = gain;
@@ -318,31 +343,35 @@
 }
 
 int GainControlImpl::compression_gain_db() const {
+  rtc::CritScope cs(crit_capture_);
   return compression_gain_db_;
 }
 
 int GainControlImpl::enable_limiter(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_capture_);
   limiter_enabled_ = enable;
   return Configure();
 }
 
 bool GainControlImpl::is_limiter_enabled() const {
+  rtc::CritScope cs(crit_capture_);
   return limiter_enabled_;
 }
 
 int GainControlImpl::Initialize() {
   int err = ProcessingComponent::Initialize();
-  if (err != apm_->kNoError || !is_component_enabled()) {
+  if (err != AudioProcessing::kNoError || !is_component_enabled()) {
     return err;
   }
 
   AllocateRenderQueue();
 
+  rtc::CritScope cs_capture(crit_capture_);
   const int n = num_handles();
   RTC_CHECK_GE(n, 0) << "Bad number of handles: " << n;
+
   capture_levels_.assign(n, analog_capture_level_);
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 void GainControlImpl::AllocateRenderQueue() {
@@ -350,6 +379,9 @@
       std::max<size_t>(static_cast<size_t>(1),
                        kMaxAllowedValuesOfSamplesPerFrame * num_handles());
 
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
   if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
     render_queue_element_max_size_ = new_render_queue_element_max_size;
     std::vector<int16_t> template_queue_element(render_queue_element_max_size_);
@@ -375,6 +407,9 @@
 }
 
 int GainControlImpl::InitializeHandle(void* handle) const {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
   return WebRtcAgc_Init(static_cast<Handle*>(handle),
                           minimum_capture_level_,
                           maximum_capture_level_,
@@ -383,6 +418,8 @@
 }
 
 int GainControlImpl::ConfigureHandle(void* handle) const {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
   WebRtcAgcConfig config;
   // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
   //            change the interface.
@@ -397,6 +434,7 @@
 }
 
 int GainControlImpl::num_handles_required() const {
+  // Not locked as it only relies on APM public API which is threadsafe.
   return apm_->num_output_channels();
 }
 
@@ -404,6 +442,6 @@
   // The AGC has no get_error() function.
   // (Despite listing errors in its interface...)
   assert(handle != NULL);
-  return apm_->kUnspecifiedError;
+  return AudioProcessing::kUnspecifiedError;
 }
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/gain_control_impl.h b/webrtc/modules/audio_processing/gain_control_impl.h
index 25b16c0..b531de9 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.h
+++ b/webrtc/modules/audio_processing/gain_control_impl.h
@@ -13,7 +13,9 @@
 
 #include <vector>
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
 #include "webrtc/common_audio/swap_queue.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
@@ -21,13 +23,13 @@
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class GainControlImpl : public GainControl,
                         public ProcessingComponent {
  public:
   GainControlImpl(const AudioProcessing* apm,
-                  CriticalSectionWrapper* crit);
+                  rtc::CriticalSection* crit_render,
+                  rtc::CriticalSection* crit_capture);
   virtual ~GainControlImpl();
 
   int ProcessRenderAudio(AudioBuffer* audio);
@@ -71,22 +73,29 @@
 
   void AllocateRenderQueue();
 
+  // Not guarded as its public API is thread safe.
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
-  Mode mode_;
-  int minimum_capture_level_;
-  int maximum_capture_level_;
-  bool limiter_enabled_;
-  int target_level_dbfs_;
-  int compression_gain_db_;
-  std::vector<int> capture_levels_;
-  int analog_capture_level_;
-  bool was_analog_level_set_;
-  bool stream_is_saturated_;
 
-  size_t render_queue_element_max_size_;
-  std::vector<int16_t> render_queue_buffer_;
-  std::vector<int16_t> capture_queue_buffer_;
+  rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
+
+  Mode mode_ GUARDED_BY(crit_capture_);
+  int minimum_capture_level_ GUARDED_BY(crit_capture_);
+  int maximum_capture_level_ GUARDED_BY(crit_capture_);
+  bool limiter_enabled_ GUARDED_BY(crit_capture_);
+  int target_level_dbfs_ GUARDED_BY(crit_capture_);
+  int compression_gain_db_ GUARDED_BY(crit_capture_);
+  std::vector<int> capture_levels_ GUARDED_BY(crit_capture_);
+  int analog_capture_level_ GUARDED_BY(crit_capture_);
+  bool was_analog_level_set_ GUARDED_BY(crit_capture_);
+  bool stream_is_saturated_ GUARDED_BY(crit_capture_);
+
+  size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+      GUARDED_BY(crit_capture_);
+  std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
+  std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+  // Lock protection not needed.
   rtc::scoped_ptr<
       SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
       render_signal_queue_;
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index 29e4820..2ad0a50 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -100,18 +100,20 @@
 typedef FilterState Handle;
 
 HighPassFilterImpl::HighPassFilterImpl(const AudioProcessing* apm,
-                                       CriticalSectionWrapper* crit)
-  : ProcessingComponent(),
-    apm_(apm),
-    crit_(crit) {}
+                                       rtc::CriticalSection* crit)
+    : ProcessingComponent(), apm_(apm), crit_(crit) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit);
+}
 
 HighPassFilterImpl::~HighPassFilterImpl() {}
 
 int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
-  int err = apm_->kNoError;
+  rtc::CritScope cs(crit_);
+  int err = AudioProcessing::kNoError;
 
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
 
   assert(audio->num_frames_per_band() <= 160);
@@ -122,20 +124,21 @@
                  audio->split_bands(i)[kBand0To8kHz],
                  audio->num_frames_per_band());
 
-    if (err != apm_->kNoError) {
+    if (err != AudioProcessing::kNoError) {
       return GetHandleError(my_handle);
     }
   }
 
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int HighPassFilterImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   return EnableComponent(enable);
 }
 
 bool HighPassFilterImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
   return is_component_enabled();
 }
 
@@ -148,12 +151,15 @@
 }
 
 int HighPassFilterImpl::InitializeHandle(void* handle) const {
+  // TODO(peah): Remove dependency on apm for the
+  // capture side sample rate.
+  rtc::CritScope cs(crit_);
   return InitializeFilter(static_cast<Handle*>(handle),
                           apm_->proc_sample_rate_hz());
 }
 
 int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
-  return apm_->kNoError; // Not configurable.
+  return AudioProcessing::kNoError;  // Not configurable.
 }
 
 int HighPassFilterImpl::num_handles_required() const {
@@ -163,6 +169,6 @@
 int HighPassFilterImpl::GetHandleError(void* handle) const {
   // The component has no detailed errors.
   assert(handle != NULL);
-  return apm_->kUnspecifiedError;
+  return AudioProcessing::kUnspecifiedError;
 }
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.h b/webrtc/modules/audio_processing/high_pass_filter_impl.h
index 90b393e..6f8079e 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.h
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.h
@@ -11,18 +11,18 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
 
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class HighPassFilterImpl : public HighPassFilter,
                            public ProcessingComponent {
  public:
-  HighPassFilterImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
+  HighPassFilterImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
   virtual ~HighPassFilterImpl();
 
   int ProcessCaptureAudio(AudioBuffer* audio);
@@ -43,7 +43,8 @@
   int GetHandleError(void* handle) const override;
 
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
+
+  rtc::CriticalSection* const crit_;
 };
 }  // namespace webrtc
 
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.cc b/webrtc/modules/audio_processing/level_estimator_impl.cc
index 35fe697..52f6697 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -18,13 +18,17 @@
 namespace webrtc {
 
 LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
-                                       CriticalSectionWrapper* crit)
-    : ProcessingComponent(),
-      crit_(crit) {}
+                                       rtc::CriticalSection* crit)
+    : ProcessingComponent(), crit_(crit) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit);
+}
 
 LevelEstimatorImpl::~LevelEstimatorImpl() {}
 
 int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_);
+
   if (!is_component_enabled()) {
     return AudioProcessing::kNoError;
   }
@@ -39,15 +43,17 @@
 }
 
 int LevelEstimatorImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   return EnableComponent(enable);
 }
 
 bool LevelEstimatorImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
   return is_component_enabled();
 }
 
 int LevelEstimatorImpl::RMS() {
+  rtc::CritScope cs(crit_);
   if (!is_component_enabled()) {
     return AudioProcessing::kNotEnabledError;
   }
@@ -67,6 +73,7 @@
 }
 
 int LevelEstimatorImpl::InitializeHandle(void* handle) const {
+  rtc::CritScope cs(crit_);
   static_cast<RMSLevel*>(handle)->Reset();
   return AudioProcessing::kNoError;
 }
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.h b/webrtc/modules/audio_processing/level_estimator_impl.h
index 0d0050c..d560223 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.h
+++ b/webrtc/modules/audio_processing/level_estimator_impl.h
@@ -11,6 +11,7 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
 #include "webrtc/modules/audio_processing/rms_level.h"
@@ -18,13 +19,11 @@
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class LevelEstimatorImpl : public LevelEstimator,
                            public ProcessingComponent {
  public:
-  LevelEstimatorImpl(const AudioProcessing* apm,
-                     CriticalSectionWrapper* crit);
+  LevelEstimatorImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
   virtual ~LevelEstimatorImpl();
 
   int ProcessStream(AudioBuffer* audio);
@@ -45,7 +44,7 @@
   int num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
-  CriticalSectionWrapper* crit_;
+  rtc::CriticalSection* const crit_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
index 84481f6..837585f 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -18,7 +18,6 @@
 #elif defined(WEBRTC_NS_FIXED)
 #include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
 #endif
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 
 namespace webrtc {
@@ -47,18 +46,18 @@
 }  // namespace
 
 NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
-                                           CriticalSectionWrapper* crit)
-  : ProcessingComponent(),
-    apm_(apm),
-    crit_(crit),
-    level_(kModerate) {}
+                                           rtc::CriticalSection* crit)
+    : ProcessingComponent(), apm_(apm), crit_(crit), level_(kModerate) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit);
+}
 
 NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
 
 int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
 #if defined(WEBRTC_NS_FLOAT)
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == num_handles());
@@ -69,12 +68,13 @@
     WebRtcNs_Analyze(my_handle, audio->split_bands_const_f(i)[kBand0To8kHz]);
   }
 #endif
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_);
   if (!is_component_enabled()) {
-    return apm_->kNoError;
+    return AudioProcessing::kNoError;
   }
   assert(audio->num_frames_per_band() <= 160);
   assert(audio->num_channels() == num_handles());
@@ -93,22 +93,23 @@
                       audio->split_bands(i));
 #endif
   }
-  return apm_->kNoError;
+  return AudioProcessing::kNoError;
 }
 
 int NoiseSuppressionImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   return EnableComponent(enable);
 }
 
 bool NoiseSuppressionImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
   return is_component_enabled();
 }
 
 int NoiseSuppressionImpl::set_level(Level level) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   if (MapSetting(level) == -1) {
-    return apm_->kBadParameterError;
+    return AudioProcessing::kBadParameterError;
   }
 
   level_ = level;
@@ -116,10 +117,12 @@
 }
 
 NoiseSuppression::Level NoiseSuppressionImpl::level() const {
+  rtc::CritScope cs(crit_);
   return level_;
 }
 
 float NoiseSuppressionImpl::speech_probability() const {
+  rtc::CritScope cs(crit_);
 #if defined(WEBRTC_NS_FLOAT)
   float probability_average = 0.0f;
   for (int i = 0; i < num_handles(); i++) {
@@ -129,7 +132,7 @@
   return probability_average / num_handles();
 #elif defined(WEBRTC_NS_FIXED)
   // Currently not available for the fixed point implementation.
-  return apm_->kUnsupportedFunctionError;
+  return AudioProcessing::kUnsupportedFunctionError;
 #endif
 }
 
@@ -160,6 +163,7 @@
 }
 
 int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
+  rtc::CritScope cs(crit_);
 #if defined(WEBRTC_NS_FLOAT)
   return WebRtcNs_set_policy(static_cast<Handle*>(handle),
                              MapSetting(level_));
@@ -176,6 +180,6 @@
 int NoiseSuppressionImpl::GetHandleError(void* handle) const {
   // The NS has no get_error() function.
   assert(handle != NULL);
-  return apm_->kUnspecifiedError;
+  return AudioProcessing::kUnspecifiedError;
 }
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.h b/webrtc/modules/audio_processing/noise_suppression_impl.h
index 76a39b8..1564fe5 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.h
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.h
@@ -11,19 +11,18 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
 
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class NoiseSuppressionImpl : public NoiseSuppression,
                              public ProcessingComponent {
  public:
-  NoiseSuppressionImpl(const AudioProcessing* apm,
-                       CriticalSectionWrapper* crit);
+  NoiseSuppressionImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
   virtual ~NoiseSuppressionImpl();
 
   int AnalyzeCaptureAudio(AudioBuffer* audio);
@@ -47,9 +46,12 @@
   int num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
+  // Not guarded as its public API is thread safe.
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
-  Level level_;
+
+  rtc::CriticalSection* const crit_;
+
+  Level level_ GUARDED_BY(crit_);
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.cc b/webrtc/modules/audio_processing/voice_detection_impl.cc
index 374189e..25c7269 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -12,9 +12,10 @@
 
 #include <assert.h>
 
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/thread_checker.h"
 #include "webrtc/common_audio/vad/include/webrtc_vad.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
 namespace webrtc {
 
@@ -38,19 +39,23 @@
 }  // namespace
 
 VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessing* apm,
-                                       CriticalSectionWrapper* crit)
-  : ProcessingComponent(),
-    apm_(apm),
-    crit_(crit),
-    stream_has_voice_(false),
-    using_external_vad_(false),
-    likelihood_(kLowLikelihood),
-    frame_size_ms_(10),
-    frame_size_samples_(0) {}
+                                       rtc::CriticalSection* crit)
+    : ProcessingComponent(),
+      apm_(apm),
+      crit_(crit),
+      stream_has_voice_(false),
+      using_external_vad_(false),
+      likelihood_(kLowLikelihood),
+      frame_size_ms_(10),
+      frame_size_samples_(0) {
+  RTC_DCHECK(apm);
+  RTC_DCHECK(crit);
+}
 
 VoiceDetectionImpl::~VoiceDetectionImpl() {}
 
 int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_);
   if (!is_component_enabled()) {
     return apm_->kNoError;
   }
@@ -81,28 +86,31 @@
 }
 
 int VoiceDetectionImpl::Enable(bool enable) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   return EnableComponent(enable);
 }
 
 bool VoiceDetectionImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
   return is_component_enabled();
 }
 
 int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
+  rtc::CritScope cs(crit_);
   using_external_vad_ = true;
   stream_has_voice_ = has_voice;
   return apm_->kNoError;
 }
 
 bool VoiceDetectionImpl::stream_has_voice() const {
+  rtc::CritScope cs(crit_);
   // TODO(ajm): enable this assertion?
   //assert(using_external_vad_ || is_component_enabled());
   return stream_has_voice_;
 }
 
 int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   if (MapSetting(likelihood) == -1) {
     return apm_->kBadParameterError;
   }
@@ -112,11 +120,12 @@
 }
 
 VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
+  rtc::CritScope cs(crit_);
   return likelihood_;
 }
 
 int VoiceDetectionImpl::set_frame_size_ms(int size) {
-  CriticalSectionScoped crit_scoped(crit_);
+  rtc::CritScope cs(crit_);
   assert(size == 10); // TODO(ajm): remove when supported.
   if (size != 10 &&
       size != 20 &&
@@ -130,11 +139,14 @@
 }
 
 int VoiceDetectionImpl::frame_size_ms() const {
+  rtc::CritScope cs(crit_);
   return frame_size_ms_;
 }
 
 int VoiceDetectionImpl::Initialize() {
   int err = ProcessingComponent::Initialize();
+
+  rtc::CritScope cs(crit_);
   if (err != apm_->kNoError || !is_component_enabled()) {
     return err;
   }
@@ -160,6 +172,7 @@
 }
 
 int VoiceDetectionImpl::ConfigureHandle(void* handle) const {
+  rtc::CritScope cs(crit_);
   return WebRtcVad_set_mode(static_cast<Handle*>(handle),
                             MapSetting(likelihood_));
 }
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.h b/webrtc/modules/audio_processing/voice_detection_impl.h
index b188083..3a1193c 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.h
+++ b/webrtc/modules/audio_processing/voice_detection_impl.h
@@ -11,18 +11,18 @@
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
 
+#include "webrtc/base/criticalsection.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/modules/audio_processing/processing_component.h"
 
 namespace webrtc {
 
 class AudioBuffer;
-class CriticalSectionWrapper;
 
 class VoiceDetectionImpl : public VoiceDetection,
                            public ProcessingComponent {
  public:
-  VoiceDetectionImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
+  VoiceDetectionImpl(const AudioProcessing* apm, rtc::CriticalSection* crit);
   virtual ~VoiceDetectionImpl();
 
   int ProcessCaptureAudio(AudioBuffer* audio);
@@ -51,13 +51,16 @@
   int num_handles_required() const override;
   int GetHandleError(void* handle) const override;
 
+  // Not guarded as its public API is thread safe.
   const AudioProcessing* apm_;
-  CriticalSectionWrapper* crit_;
-  bool stream_has_voice_;
-  bool using_external_vad_;
-  Likelihood likelihood_;
-  int frame_size_ms_;
-  size_t frame_size_samples_;
+
+  rtc::CriticalSection* const crit_;
+
+  bool stream_has_voice_ GUARDED_BY(crit_);
+  bool using_external_vad_ GUARDED_BY(crit_);
+  Likelihood likelihood_ GUARDED_BY(crit_);
+  int frame_size_ms_ GUARDED_BY(crit_);
+  size_t frame_size_samples_ GUARDED_BY(crit_);
 };
 }  // namespace webrtc