Cleanup of iOS AudioDevice implementation

TBR=tkchin
BUG=webrtc:4789
TEST=modules_unittests --gtest_filter=AudioDeviceTest* and AppRTCDemo

Review URL: https://codereview.webrtc.org/1206783002 .

Cr-Commit-Position: refs/heads/master@{#9578}
diff --git a/webrtc/base/logging.cc b/webrtc/base/logging.cc
index 1ac1373..58f8f58 100644
--- a/webrtc/base/logging.cc
+++ b/webrtc/base/logging.cc
@@ -37,6 +37,7 @@
 #include <vector>
 
 #include "webrtc/base/logging.h"
+#include "webrtc/base/platform_thread.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/stringencode.h"
 #include "webrtc/base/stringutils.h"
@@ -111,10 +112,8 @@
   }
 
   if (thread_) {
-#if defined(WEBRTC_WIN)
-    DWORD id = GetCurrentThreadId();
-    print_stream_ << "[" << std::hex << id << std::dec << "] ";
-#endif  // WEBRTC_WIN
+    PlatformThreadId id = CurrentThreadId();
+    print_stream_ << "[" << std::dec << id << "] ";
   }
 
   if (err_ctx != ERRCTX_NONE) {
diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h
index 8d96d27..0bc8250 100644
--- a/webrtc/modules/audio_device/android/audio_manager.h
+++ b/webrtc/modules/audio_device/android/audio_manager.h
@@ -16,6 +16,7 @@
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/thread_checker.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/modules/utility/interface/helpers_android.h"
@@ -23,60 +24,6 @@
 
 namespace webrtc {
 
-class AudioParameters {
- public:
-  enum { kBitsPerSample = 16 };
-  AudioParameters()
-      : sample_rate_(0),
-        channels_(0),
-        frames_per_buffer_(0),
-        frames_per_10ms_buffer_(0),
-        bits_per_sample_(kBitsPerSample) {}
-  AudioParameters(int sample_rate, int channels, int frames_per_buffer)
-      : sample_rate_(sample_rate),
-        channels_(channels),
-        frames_per_buffer_(frames_per_buffer),
-        frames_per_10ms_buffer_(sample_rate / 100),
-        bits_per_sample_(kBitsPerSample) {}
-  void reset(int sample_rate, int channels, int frames_per_buffer) {
-    sample_rate_ = sample_rate;
-    channels_ = channels;
-    frames_per_buffer_ = frames_per_buffer;
-    frames_per_10ms_buffer_ = (sample_rate / 100);
-  }
-  int sample_rate() const { return sample_rate_; }
-  int channels() const { return channels_; }
-  int frames_per_buffer() const { return frames_per_buffer_; }
-  int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
-  int bits_per_sample() const { return bits_per_sample_; }
-  bool is_valid() const {
-    return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
-  }
-  int GetBytesPerFrame() const { return channels_ * bits_per_sample_ / 8; }
-  int GetBytesPerBuffer() const {
-    return frames_per_buffer_ * GetBytesPerFrame();
-  }
-  int GetBytesPer10msBuffer() const {
-    return frames_per_10ms_buffer_ * GetBytesPerFrame();
-  }
-  float GetBufferSizeInMilliseconds() const {
-    if (sample_rate_ == 0)
-      return 0.0f;
-    return frames_per_buffer_ / (sample_rate_ / 1000.0f);
-  }
-
- private:
-  int sample_rate_;
-  int channels_;
-  // Lowest possible size of native audio buffer. Measured in number of frames.
-  // This size is injected into the OpenSL ES output (since it does not "talk
-  // Java") implementation but is currently not utilized by the Java
-  // implementation since it aquires the same value internally.
-  int frames_per_buffer_;
-  int frames_per_10ms_buffer_;
-  int bits_per_sample_;
-};
-
 // Implements support for functions in the WebRTC audio stack for Android that
 // relies on the AudioManager in android.media. It also populates an
 // AudioParameter structure with native audio parameters detected at
diff --git a/webrtc/modules/audio_device/audio_device.gypi b/webrtc/modules/audio_device/audio_device.gypi
index 9c9380e..97d5eca 100644
--- a/webrtc/modules/audio_device/audio_device.gypi
+++ b/webrtc/modules/audio_device/audio_device.gypi
@@ -103,6 +103,7 @@
             'audio_device_impl.h',
             'ios/audio_device_ios.h',
             'ios/audio_device_ios.mm',
+            'ios/audio_device_not_implemented_ios.mm',
             'linux/alsasymboltable_linux.cc',
             'linux/alsasymboltable_linux.h',
             'linux/audio_device_alsa_linux.cc',
@@ -177,6 +178,7 @@
                     '-framework AudioToolbox',
                     '-framework AVFoundation',
                     '-framework Foundation',
+                    '-framework UIKit',
                   ],
                 },
               },
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index 12b28b3..3cfbc7d 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -130,8 +130,6 @@
 
 int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz)
 {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingSampleRate(fsHz=%u)", fsHz);
-
     CriticalSectionScoped lock(&_critSect);
     _recSampleRate = fsHz;
     return 0;
@@ -143,8 +141,6 @@
 
 int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz)
 {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutSampleRate(fsHz=%u)", fsHz);
-
     CriticalSectionScoped lock(&_critSect);
     _playSampleRate = fsHz;
     return 0;
@@ -174,8 +170,6 @@
 
 int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
 {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingChannels(channels=%u)", channels);
-
     CriticalSectionScoped lock(&_critSect);
     _recChannels = channels;
     _recBytesPerSample = 2*channels;  // 16 bits per sample in mono, 32 bits in stereo
@@ -188,8 +182,6 @@
 
 int32_t AudioDeviceBuffer::SetPlayoutChannels(uint8_t channels)
 {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutChannels(channels=%u)", channels);
-
     CriticalSectionScoped lock(&_critSect);
     _playChannels = channels;
     // 16 bits per sample in mono, 32 bits in stereo
diff --git a/webrtc/modules/audio_device/audio_device_generic.cc b/webrtc/modules/audio_device/audio_device_generic.cc
index 958abbf..4576f0e 100644
--- a/webrtc/modules/audio_device/audio_device_generic.cc
+++ b/webrtc/modules/audio_device/audio_device_generic.cc
@@ -9,73 +9,68 @@
  */
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/base/logging.h"
 
 namespace webrtc {
 
 int32_t AudioDeviceGeneric::SetRecordingSampleRate(
-    const uint32_t samplesPerSec)
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Set recording sample rate not supported on this platform");
-    return -1;
+    const uint32_t samplesPerSec) {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
-int32_t AudioDeviceGeneric::SetPlayoutSampleRate(
-    const uint32_t samplesPerSec)
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Set playout sample rate not supported on this platform");
-    return -1;
+int32_t AudioDeviceGeneric::SetPlayoutSampleRate(const uint32_t samplesPerSec) {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
-int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable)
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Set loudspeaker status not supported on this platform");
-    return -1;
+int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable) {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
-int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Get loudspeaker status not supported on this platform");
-    return -1;
+int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
-int32_t AudioDeviceGeneric::ResetAudioDevice()
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Reset audio device not supported on this platform");
-    return -1;
+int32_t AudioDeviceGeneric::ResetAudioDevice() {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
 int32_t AudioDeviceGeneric::SoundDeviceControl(unsigned int par1,
-    unsigned int par2, unsigned int par3, unsigned int par4)
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Sound device control not supported on this platform");
-    return -1;
+                                               unsigned int par2,
+                                               unsigned int par3,
+                                               unsigned int par4) {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
 bool AudioDeviceGeneric::BuiltInAECIsAvailable() const {
-  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-      "Built-in AEC not supported on this platform");
+  LOG_F(LS_ERROR) << "Not supported on this platform";
   return false;
 }
 
-int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable)
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Built-in AEC not supported on this platform");
-    return -1;
+int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
-bool AudioDeviceGeneric::BuiltInAECIsEnabled() const
-{
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-        "Windows AEC not supported on this platform");
-    return false;
+bool AudioDeviceGeneric::BuiltInAECIsEnabled() const {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return false;
+}
+
+int AudioDeviceGeneric::GetPlayoutAudioParameters(
+    AudioParameters* params) const {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
+}
+int AudioDeviceGeneric::GetRecordAudioParameters(
+    AudioParameters* params) const {
+  LOG_F(LS_ERROR) << "Not supported on this platform";
+  return -1;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/audio_device_generic.h b/webrtc/modules/audio_device/audio_device_generic.h
index 800cc39..2da3246 100644
--- a/webrtc/modules/audio_device/audio_device_generic.h
+++ b/webrtc/modules/audio_device/audio_device_generic.h
@@ -16,168 +16,160 @@
 
 namespace webrtc {
 
-class AudioDeviceGeneric
-{
+class AudioDeviceGeneric {
  public:
+  // Retrieve the currently utilized audio layer
+  virtual int32_t ActiveAudioLayer(
+      AudioDeviceModule::AudioLayer& audioLayer) const = 0;
 
-	// Retrieve the currently utilized audio layer
-	virtual int32_t ActiveAudioLayer(
-        AudioDeviceModule::AudioLayer& audioLayer) const = 0;
+  // Main initializaton and termination
+  virtual int32_t Init() = 0;
+  virtual int32_t Terminate() = 0;
+  virtual bool Initialized() const = 0;
 
-	// Main initializaton and termination
-    virtual int32_t Init() = 0;
-    virtual int32_t Terminate() = 0;
-	virtual bool Initialized() const = 0;
+  // Device enumeration
+  virtual int16_t PlayoutDevices() = 0;
+  virtual int16_t RecordingDevices() = 0;
+  virtual int32_t PlayoutDeviceName(uint16_t index,
+                                    char name[kAdmMaxDeviceNameSize],
+                                    char guid[kAdmMaxGuidSize]) = 0;
+  virtual int32_t RecordingDeviceName(uint16_t index,
+                                      char name[kAdmMaxDeviceNameSize],
+                                      char guid[kAdmMaxGuidSize]) = 0;
 
-	// Device enumeration
-	virtual int16_t PlayoutDevices() = 0;
-	virtual int16_t RecordingDevices() = 0;
-	virtual int32_t PlayoutDeviceName(
-        uint16_t index,
-        char name[kAdmMaxDeviceNameSize],
-        char guid[kAdmMaxGuidSize]) = 0;
-    virtual int32_t RecordingDeviceName(
-        uint16_t index,
-        char name[kAdmMaxDeviceNameSize],
-        char guid[kAdmMaxGuidSize]) = 0;
+  // Device selection
+  virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
+  virtual int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) = 0;
+  virtual int32_t SetRecordingDevice(uint16_t index) = 0;
+  virtual int32_t SetRecordingDevice(
+      AudioDeviceModule::WindowsDeviceType device) = 0;
 
-	// Device selection
-	virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
-	virtual int32_t SetPlayoutDevice(
-        AudioDeviceModule::WindowsDeviceType device) = 0;
-    virtual int32_t SetRecordingDevice(uint16_t index) = 0;
-	virtual int32_t SetRecordingDevice(
-        AudioDeviceModule::WindowsDeviceType device) = 0;
+  // Audio transport initialization
+  virtual int32_t PlayoutIsAvailable(bool& available) = 0;
+  virtual int32_t InitPlayout() = 0;
+  virtual bool PlayoutIsInitialized() const = 0;
+  virtual int32_t RecordingIsAvailable(bool& available) = 0;
+  virtual int32_t InitRecording() = 0;
+  virtual bool RecordingIsInitialized() const = 0;
 
-	// Audio transport initialization
-    virtual int32_t PlayoutIsAvailable(bool& available) = 0;
-    virtual int32_t InitPlayout() = 0;
-    virtual bool PlayoutIsInitialized() const = 0;
-    virtual int32_t RecordingIsAvailable(bool& available) = 0;
-    virtual int32_t InitRecording() = 0;
-    virtual bool RecordingIsInitialized() const = 0;
+  // Audio transport control
+  virtual int32_t StartPlayout() = 0;
+  virtual int32_t StopPlayout() = 0;
+  virtual bool Playing() const = 0;
+  virtual int32_t StartRecording() = 0;
+  virtual int32_t StopRecording() = 0;
+  virtual bool Recording() const = 0;
 
-	// Audio transport control
-    virtual int32_t StartPlayout() = 0;
-    virtual int32_t StopPlayout() = 0;
-    virtual bool Playing() const = 0;
-	virtual int32_t StartRecording() = 0;
-    virtual int32_t StopRecording() = 0;
-    virtual bool Recording() const = 0;
+  // Microphone Automatic Gain Control (AGC)
+  virtual int32_t SetAGC(bool enable) = 0;
+  virtual bool AGC() const = 0;
 
-    // Microphone Automatic Gain Control (AGC)
-    virtual int32_t SetAGC(bool enable) = 0;
-    virtual bool AGC() const = 0;
+  // Volume control based on the Windows Wave API (Windows only)
+  virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
+                                   uint16_t volumeRight) = 0;
+  virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
+                                uint16_t& volumeRight) const = 0;
 
-    // Volume control based on the Windows Wave API (Windows only)
-    virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
-                                     uint16_t volumeRight) = 0;
-    virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
-                                  uint16_t& volumeRight) const = 0;
+  // Audio mixer initialization
+  virtual int32_t InitSpeaker() = 0;
+  virtual bool SpeakerIsInitialized() const = 0;
+  virtual int32_t InitMicrophone() = 0;
+  virtual bool MicrophoneIsInitialized() const = 0;
 
-	// Audio mixer initialization
-    virtual int32_t InitSpeaker() = 0;
-    virtual bool SpeakerIsInitialized() const = 0;
-    virtual int32_t InitMicrophone() = 0;
-    virtual bool MicrophoneIsInitialized() const = 0;
+  // Speaker volume controls
+  virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
+  virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
+  virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
+  virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
+  virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
+  virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const = 0;
 
-    // Speaker volume controls
-	virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
-    virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
-    virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
-    virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
-    virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
-    virtual int32_t SpeakerVolumeStepSize(
-        uint16_t& stepSize) const = 0;
+  // Microphone volume controls
+  virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
+  virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
+  virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
+  virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0;
+  virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0;
+  virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const = 0;
 
-    // Microphone volume controls
-	virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
-    virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
-    virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
-    virtual int32_t MaxMicrophoneVolume(
-        uint32_t& maxVolume) const = 0;
-    virtual int32_t MinMicrophoneVolume(
-        uint32_t& minVolume) const = 0;
-    virtual int32_t MicrophoneVolumeStepSize(
-        uint16_t& stepSize) const = 0;
+  // Speaker mute control
+  virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
+  virtual int32_t SetSpeakerMute(bool enable) = 0;
+  virtual int32_t SpeakerMute(bool& enabled) const = 0;
 
-    // Speaker mute control
-    virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
-    virtual int32_t SetSpeakerMute(bool enable) = 0;
-    virtual int32_t SpeakerMute(bool& enabled) const = 0;
+  // Microphone mute control
+  virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
+  virtual int32_t SetMicrophoneMute(bool enable) = 0;
+  virtual int32_t MicrophoneMute(bool& enabled) const = 0;
 
-	// Microphone mute control
-    virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
-    virtual int32_t SetMicrophoneMute(bool enable) = 0;
-    virtual int32_t MicrophoneMute(bool& enabled) const = 0;
+  // Microphone boost control
+  virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
+  virtual int32_t SetMicrophoneBoost(bool enable) = 0;
+  virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
 
-    // Microphone boost control
-    virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
-	virtual int32_t SetMicrophoneBoost(bool enable) = 0;
-    virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
+  // Stereo support
+  virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
+  virtual int32_t SetStereoPlayout(bool enable) = 0;
+  virtual int32_t StereoPlayout(bool& enabled) const = 0;
+  virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
+  virtual int32_t SetStereoRecording(bool enable) = 0;
+  virtual int32_t StereoRecording(bool& enabled) const = 0;
 
-    // Stereo support
-    virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
-	virtual int32_t SetStereoPlayout(bool enable) = 0;
-    virtual int32_t StereoPlayout(bool& enabled) const = 0;
-    virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
-    virtual int32_t SetStereoRecording(bool enable) = 0;
-    virtual int32_t StereoRecording(bool& enabled) const = 0;
+  // Delay information and control
+  virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+                                   uint16_t sizeMS = 0) = 0;
+  virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+                                uint16_t& sizeMS) const = 0;
+  virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
+  virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
 
-    // Delay information and control
-	virtual int32_t SetPlayoutBuffer(
-        const AudioDeviceModule::BufferType type,
-        uint16_t sizeMS = 0) = 0;
-    virtual int32_t PlayoutBuffer(
-        AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const = 0;
-    virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
-	virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
+  // CPU load
+  virtual int32_t CPULoad(uint16_t& load) const = 0;
 
-    // CPU load
-    virtual int32_t CPULoad(uint16_t& load) const = 0;
+  // Native sample rate controls (samples/sec)
+  virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
+  virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
 
-    // Native sample rate controls (samples/sec)
-	virtual int32_t SetRecordingSampleRate(
-        const uint32_t samplesPerSec);
-	virtual int32_t SetPlayoutSampleRate(
-        const uint32_t samplesPerSec);
+  // Speaker audio routing (for mobile devices)
+  virtual int32_t SetLoudspeakerStatus(bool enable);
+  virtual int32_t GetLoudspeakerStatus(bool& enable) const;
 
-    // Speaker audio routing (for mobile devices)
-    virtual int32_t SetLoudspeakerStatus(bool enable);
-    virtual int32_t GetLoudspeakerStatus(bool& enable) const;
+  // Reset Audio Device (for mobile devices)
+  virtual int32_t ResetAudioDevice();
 
-    // Reset Audio Device (for mobile devices)
-    virtual int32_t ResetAudioDevice();
+  // Sound Audio Device control (for WinCE only)
+  virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
+                                     unsigned int par2 = 0,
+                                     unsigned int par3 = 0,
+                                     unsigned int par4 = 0);
 
-    // Sound Audio Device control (for WinCE only)
-    virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
-                                       unsigned int par2 = 0,
-                                       unsigned int par3 = 0,
-                                       unsigned int par4 = 0);
+  // Android only
+  virtual bool BuiltInAECIsAvailable() const;
 
-    // Android only
-    virtual bool BuiltInAECIsAvailable() const;
+  // Windows Core Audio and Android only.
+  virtual int32_t EnableBuiltInAEC(bool enable);
 
-    // Windows Core Audio and Android only.
-    virtual int32_t EnableBuiltInAEC(bool enable);
+  // Windows Core Audio only.
+  virtual bool BuiltInAECIsEnabled() const;
 
-    // Windows Core Audio only.
-    virtual bool BuiltInAECIsEnabled() const;
+  // iOS only.
+  // TODO(henrika): add Android support.
+  virtual int GetPlayoutAudioParameters(AudioParameters* params) const;
+  virtual int GetRecordAudioParameters(AudioParameters* params) const;
 
-public:
-    virtual bool PlayoutWarning() const = 0;
-    virtual bool PlayoutError() const = 0;
-    virtual bool RecordingWarning() const = 0;
-    virtual bool RecordingError() const = 0;
-    virtual void ClearPlayoutWarning() = 0;
-    virtual void ClearPlayoutError() = 0;
-    virtual void ClearRecordingWarning() = 0;
-    virtual void ClearRecordingError() = 0;
+  virtual bool PlayoutWarning() const = 0;
+  virtual bool PlayoutError() const = 0;
+  virtual bool RecordingWarning() const = 0;
+  virtual bool RecordingError() const = 0;
+  virtual void ClearPlayoutWarning() = 0;
+  virtual void ClearPlayoutError() = 0;
+  virtual void ClearRecordingWarning() = 0;
+  virtual void ClearRecordingError() = 0;
 
-public:
-    virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
+  virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
 
-    virtual ~AudioDeviceGeneric() {}
+  virtual ~AudioDeviceGeneric() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/audio_device_impl.cc b/webrtc/modules/audio_device/audio_device_impl.cc
index c29ac62..01c8cee 100644
--- a/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/webrtc/modules/audio_device/audio_device_impl.cc
@@ -325,7 +325,7 @@
     if (audioLayer == kPlatformDefaultAudio)
     {
         // Create iOS Audio Device implementation.
-        ptrAudioDevice = new AudioDeviceIOS(Id());
+      ptrAudioDevice = new AudioDeviceIOS();
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "iPhone Audio APIs will be utilized");
     }
     // END #if defined(WEBRTC_IOS)
@@ -1899,6 +1899,16 @@
   return _ptrAudioDevice->BuiltInAECIsAvailable();
 }
 
+int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
+    AudioParameters* params) const {
+  return _ptrAudioDevice->GetPlayoutAudioParameters(params);
+}
+
+int AudioDeviceModuleImpl::GetRecordAudioParameters(
+    AudioParameters* params) const {
+  return _ptrAudioDevice->GetRecordAudioParameters(params);
+}
+
 // ============================================================================
 //                                 Private Methods
 // ============================================================================
diff --git a/webrtc/modules/audio_device/audio_device_impl.h b/webrtc/modules/audio_device/audio_device_impl.h
index efc305b..24b5982 100644
--- a/webrtc/modules/audio_device/audio_device_impl.h
+++ b/webrtc/modules/audio_device/audio_device_impl.h
@@ -18,217 +18,209 @@
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 class AudioDeviceGeneric;
 class AudioManager;
 class CriticalSectionWrapper;
 
-class AudioDeviceModuleImpl : public AudioDeviceModule
-{
-public:
-    enum PlatformType
-    {
-        kPlatformNotSupported = 0,
-        kPlatformWin32 = 1,
-        kPlatformWinCe = 2,
-        kPlatformLinux = 3,
-        kPlatformMac = 4,
-        kPlatformAndroid = 5,
-        kPlatformIOS = 6
-    };
+class AudioDeviceModuleImpl : public AudioDeviceModule {
+ public:
+  enum PlatformType {
+    kPlatformNotSupported = 0,
+    kPlatformWin32 = 1,
+    kPlatformWinCe = 2,
+    kPlatformLinux = 3,
+    kPlatformMac = 4,
+    kPlatformAndroid = 5,
+    kPlatformIOS = 6
+  };
 
-    int32_t CheckPlatform();
-    int32_t CreatePlatformSpecificObjects();
-    int32_t AttachAudioBuffer();
+  int32_t CheckPlatform();
+  int32_t CreatePlatformSpecificObjects();
+  int32_t AttachAudioBuffer();
 
-    AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
-    virtual ~AudioDeviceModuleImpl();
+  AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
+  virtual ~AudioDeviceModuleImpl();
 
-public: // RefCountedModule
- int64_t TimeUntilNextProcess() override;
- int32_t Process() override;
+  int64_t TimeUntilNextProcess() override;
+  int32_t Process() override;
 
-public:
-    // Factory methods (resource allocation/deallocation)
-    static AudioDeviceModule* Create(
-        const int32_t id,
-        const AudioLayer audioLayer = kPlatformDefaultAudio);
+  // Factory methods (resource allocation/deallocation)
+  static AudioDeviceModule* Create(
+      const int32_t id,
+      const AudioLayer audioLayer = kPlatformDefaultAudio);
 
-    // Retrieve the currently utilized audio layer
-    int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+  // Retrieve the currently utilized audio layer
+  int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
 
-    // Error handling
-    ErrorCode LastError() const override;
-    int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
+  // Error handling
+  ErrorCode LastError() const override;
+  int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
 
-    // Full-duplex transportation of PCM audio
-    int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+  // Full-duplex transportation of PCM audio
+  int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
 
-    // Main initializaton and termination
-    int32_t Init() override;
-    int32_t Terminate() override;
-    bool Initialized() const override;
+  // Main initializaton and termination
+  int32_t Init() override;
+  int32_t Terminate() override;
+  bool Initialized() const override;
 
-    // Device enumeration
-    int16_t PlayoutDevices() override;
-    int16_t RecordingDevices() override;
-    int32_t PlayoutDeviceName(uint16_t index,
+  // Device enumeration
+  int16_t PlayoutDevices() override;
+  int16_t RecordingDevices() override;
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override;
+  int32_t RecordingDeviceName(uint16_t index,
                               char name[kAdmMaxDeviceNameSize],
                               char guid[kAdmMaxGuidSize]) override;
-    int32_t RecordingDeviceName(uint16_t index,
-                                char name[kAdmMaxDeviceNameSize],
-                                char guid[kAdmMaxGuidSize]) override;
 
-    // Device selection
-    int32_t SetPlayoutDevice(uint16_t index) override;
-    int32_t SetPlayoutDevice(WindowsDeviceType device) override;
-    int32_t SetRecordingDevice(uint16_t index) override;
-    int32_t SetRecordingDevice(WindowsDeviceType device) override;
+  // Device selection
+  int32_t SetPlayoutDevice(uint16_t index) override;
+  int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+  int32_t SetRecordingDevice(uint16_t index) override;
+  int32_t SetRecordingDevice(WindowsDeviceType device) override;
 
-    // Audio transport initialization
-    int32_t PlayoutIsAvailable(bool* available) override;
-    int32_t InitPlayout() override;
-    bool PlayoutIsInitialized() const override;
-    int32_t RecordingIsAvailable(bool* available) override;
-    int32_t InitRecording() override;
-    bool RecordingIsInitialized() const override;
+  // Audio transport initialization
+  int32_t PlayoutIsAvailable(bool* available) override;
+  int32_t InitPlayout() override;
+  bool PlayoutIsInitialized() const override;
+  int32_t RecordingIsAvailable(bool* available) override;
+  int32_t InitRecording() override;
+  bool RecordingIsInitialized() const override;
 
-    // Audio transport control
-    int32_t StartPlayout() override;
-    int32_t StopPlayout() override;
-    bool Playing() const override;
-    int32_t StartRecording() override;
-    int32_t StopRecording() override;
-    bool Recording() const override;
+  // Audio transport control
+  int32_t StartPlayout() override;
+  int32_t StopPlayout() override;
+  bool Playing() const override;
+  int32_t StartRecording() override;
+  int32_t StopRecording() override;
+  bool Recording() const override;
 
-    // Microphone Automatic Gain Control (AGC)
-    int32_t SetAGC(bool enable) override;
-    bool AGC() const override;
+  // Microphone Automatic Gain Control (AGC)
+  int32_t SetAGC(bool enable) override;
+  bool AGC() const override;
 
-    // Volume control based on the Windows Wave API (Windows only)
-    int32_t SetWaveOutVolume(uint16_t volumeLeft,
-                             uint16_t volumeRight) override;
-    int32_t WaveOutVolume(uint16_t* volumeLeft,
-                          uint16_t* volumeRight) const override;
+  // Volume control based on the Windows Wave API (Windows only)
+  int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
+  int32_t WaveOutVolume(uint16_t* volumeLeft,
+                        uint16_t* volumeRight) const override;
 
-    // Audio mixer initialization
-    int32_t InitSpeaker() override;
-    bool SpeakerIsInitialized() const override;
-    int32_t InitMicrophone() override;
-    bool MicrophoneIsInitialized() const override;
+  // Audio mixer initialization
+  int32_t InitSpeaker() override;
+  bool SpeakerIsInitialized() const override;
+  int32_t InitMicrophone() override;
+  bool MicrophoneIsInitialized() const override;
 
-    // Speaker volume controls
-    int32_t SpeakerVolumeIsAvailable(bool* available) override;
-    int32_t SetSpeakerVolume(uint32_t volume) override;
-    int32_t SpeakerVolume(uint32_t* volume) const override;
-    int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
-    int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
-    int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
+  // Speaker volume controls
+  int32_t SpeakerVolumeIsAvailable(bool* available) override;
+  int32_t SetSpeakerVolume(uint32_t volume) override;
+  int32_t SpeakerVolume(uint32_t* volume) const override;
+  int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+  int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+  int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
 
-    // Microphone volume controls
-    int32_t MicrophoneVolumeIsAvailable(bool* available) override;
-    int32_t SetMicrophoneVolume(uint32_t volume) override;
-    int32_t MicrophoneVolume(uint32_t* volume) const override;
-    int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
-    int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
-    int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
+  // Microphone volume controls
+  int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+  int32_t SetMicrophoneVolume(uint32_t volume) override;
+  int32_t MicrophoneVolume(uint32_t* volume) const override;
+  int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+  int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+  int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
 
-    // Speaker mute control
-    int32_t SpeakerMuteIsAvailable(bool* available) override;
-    int32_t SetSpeakerMute(bool enable) override;
-    int32_t SpeakerMute(bool* enabled) const override;
+  // Speaker mute control
+  int32_t SpeakerMuteIsAvailable(bool* available) override;
+  int32_t SetSpeakerMute(bool enable) override;
+  int32_t SpeakerMute(bool* enabled) const override;
 
-    // Microphone mute control
-    int32_t MicrophoneMuteIsAvailable(bool* available) override;
-    int32_t SetMicrophoneMute(bool enable) override;
-    int32_t MicrophoneMute(bool* enabled) const override;
+  // Microphone mute control
+  int32_t MicrophoneMuteIsAvailable(bool* available) override;
+  int32_t SetMicrophoneMute(bool enable) override;
+  int32_t MicrophoneMute(bool* enabled) const override;
 
-    // Microphone boost control
-    int32_t MicrophoneBoostIsAvailable(bool* available) override;
-    int32_t SetMicrophoneBoost(bool enable) override;
-    int32_t MicrophoneBoost(bool* enabled) const override;
+  // Microphone boost control
+  int32_t MicrophoneBoostIsAvailable(bool* available) override;
+  int32_t SetMicrophoneBoost(bool enable) override;
+  int32_t MicrophoneBoost(bool* enabled) const override;
 
-    // Stereo support
-    int32_t StereoPlayoutIsAvailable(bool* available) const override;
-    int32_t SetStereoPlayout(bool enable) override;
-    int32_t StereoPlayout(bool* enabled) const override;
-    int32_t StereoRecordingIsAvailable(bool* available) const override;
-    int32_t SetStereoRecording(bool enable) override;
-    int32_t StereoRecording(bool* enabled) const override;
-    int32_t SetRecordingChannel(const ChannelType channel) override;
-    int32_t RecordingChannel(ChannelType* channel) const override;
+  // Stereo support
+  int32_t StereoPlayoutIsAvailable(bool* available) const override;
+  int32_t SetStereoPlayout(bool enable) override;
+  int32_t StereoPlayout(bool* enabled) const override;
+  int32_t StereoRecordingIsAvailable(bool* available) const override;
+  int32_t SetStereoRecording(bool enable) override;
+  int32_t StereoRecording(bool* enabled) const override;
+  int32_t SetRecordingChannel(const ChannelType channel) override;
+  int32_t RecordingChannel(ChannelType* channel) const override;
 
-    // Delay information and control
-    int32_t SetPlayoutBuffer(const BufferType type,
-                             uint16_t sizeMS = 0) override;
-    int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
-    int32_t PlayoutDelay(uint16_t* delayMS) const override;
-    int32_t RecordingDelay(uint16_t* delayMS) const override;
+  // Delay information and control
+  int32_t SetPlayoutBuffer(const BufferType type, uint16_t sizeMS = 0) override;
+  int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
+  int32_t PlayoutDelay(uint16_t* delayMS) const override;
+  int32_t RecordingDelay(uint16_t* delayMS) const override;
 
-    // CPU load
-    int32_t CPULoad(uint16_t* load) const override;
+  // CPU load
+  int32_t CPULoad(uint16_t* load) const override;
 
-    // Recording of raw PCM data
-    int32_t StartRawOutputFileRecording(
-        const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
-    int32_t StopRawOutputFileRecording() override;
-    int32_t StartRawInputFileRecording(
-        const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
-    int32_t StopRawInputFileRecording() override;
+  // Recording of raw PCM data
+  int32_t StartRawOutputFileRecording(
+      const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
+  int32_t StopRawOutputFileRecording() override;
+  int32_t StartRawInputFileRecording(
+      const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
+  int32_t StopRawInputFileRecording() override;
 
-    // Native sample rate controls (samples/sec)
-    int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
-    int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
-    int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
-    int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
+  // Native sample rate controls (samples/sec)
+  int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
+  int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
+  int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
+  int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
 
-    // Mobile device specific functions
-    int32_t ResetAudioDevice() override;
-    int32_t SetLoudspeakerStatus(bool enable) override;
-    int32_t GetLoudspeakerStatus(bool* enabled) const override;
+  // Mobile device specific functions
+  int32_t ResetAudioDevice() override;
+  int32_t SetLoudspeakerStatus(bool enable) override;
+  int32_t GetLoudspeakerStatus(bool* enabled) const override;
 
-    bool BuiltInAECIsAvailable() const override;
+  bool BuiltInAECIsAvailable() const override;
 
-    int32_t EnableBuiltInAEC(bool enable) override;
-    bool BuiltInAECIsEnabled() const override;
+  int32_t EnableBuiltInAEC(bool enable) override;
+  bool BuiltInAECIsEnabled() const override;
 
-public:
-    int32_t Id() {return _id;}
+  int GetPlayoutAudioParameters(AudioParameters* params) const override;
+  int GetRecordAudioParameters(AudioParameters* params) const override;
+
+  int32_t Id() { return _id; }
 #if defined(WEBRTC_ANDROID)
-    // Only use this acccessor for test purposes on Android.
-    AudioManager* GetAndroidAudioManagerForTest() {
-      return _audioManagerAndroid.get();
-    }
+  // Only use this acccessor for test purposes on Android.
+  AudioManager* GetAndroidAudioManagerForTest() {
+    return _audioManagerAndroid.get();
+  }
 #endif
-    AudioDeviceBuffer* GetAudioDeviceBuffer() {
-      return &_audioDeviceBuffer;
-    }
+  AudioDeviceBuffer* GetAudioDeviceBuffer() { return &_audioDeviceBuffer; }
 
-private:
-    PlatformType Platform() const;
-    AudioLayer PlatformAudioLayer() const;
+ private:
+  PlatformType Platform() const;
+  AudioLayer PlatformAudioLayer() const;
 
-private:
-    CriticalSectionWrapper&      _critSect;
-    CriticalSectionWrapper&      _critSectEventCb;
-    CriticalSectionWrapper&      _critSectAudioCb;
+  CriticalSectionWrapper& _critSect;
+  CriticalSectionWrapper& _critSectEventCb;
+  CriticalSectionWrapper& _critSectAudioCb;
 
-    AudioDeviceObserver*         _ptrCbAudioDeviceObserver;
+  AudioDeviceObserver* _ptrCbAudioDeviceObserver;
 
-    AudioDeviceGeneric*          _ptrAudioDevice;
+  AudioDeviceGeneric* _ptrAudioDevice;
 
-    AudioDeviceBuffer            _audioDeviceBuffer;
+  AudioDeviceBuffer _audioDeviceBuffer;
 #if defined(WEBRTC_ANDROID)
-    rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
+  rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
 #endif
-    int32_t                      _id;
-    AudioLayer                   _platformAudioLayer;
-    int64_t                      _lastProcessTime;
-    PlatformType                 _platformType;
-    bool                         _initialized;
-    mutable ErrorCode            _lastError;
+  int32_t _id;
+  AudioLayer _platformAudioLayer;
+  int64_t _lastProcessTime;
+  PlatformType _platformType;
+  bool _initialized;
+  mutable ErrorCode _lastError;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/include/audio_device.h b/webrtc/modules/audio_device/include/audio_device.h
index 2f0c6b5..1826e0e 100644
--- a/webrtc/modules/audio_device/include/audio_device.h
+++ b/webrtc/modules/audio_device/include/audio_device.h
@@ -202,8 +202,17 @@
   // Don't use.
   virtual bool BuiltInAECIsEnabled() const { return false; }
 
+  // Only supported on iOS.
+  // TODO(henrika): Make pure virtual after updating Chromium.
+  virtual int GetPlayoutAudioParameters(AudioParameters* params) const {
+    return -1;
+  }
+  virtual int GetRecordAudioParameters(AudioParameters* params) const {
+    return -1;
+  }
+
  protected:
-  virtual ~AudioDeviceModule() {};
+  virtual ~AudioDeviceModule() {}
 };
 
 AudioDeviceModule* CreateAudioDeviceModule(
diff --git a/webrtc/modules/audio_device/include/audio_device_defines.h b/webrtc/modules/audio_device/include/audio_device_defines.h
index 56a584e..106edcb 100644
--- a/webrtc/modules/audio_device/include/audio_device_defines.h
+++ b/webrtc/modules/audio_device/include/audio_device_defines.h
@@ -26,113 +26,164 @@
 //  AudioDeviceObserver
 // ----------------------------------------------------------------------------
 
-class AudioDeviceObserver
-{
-public:
-    enum ErrorCode
-    {
-        kRecordingError = 0,
-        kPlayoutError = 1
-    };
-    enum WarningCode
-    {
-        kRecordingWarning = 0,
-        kPlayoutWarning = 1
-    };
+class AudioDeviceObserver {
+ public:
+  enum ErrorCode { kRecordingError = 0, kPlayoutError = 1 };
+  enum WarningCode { kRecordingWarning = 0, kPlayoutWarning = 1 };
 
-    virtual void OnErrorIsReported(const ErrorCode error) = 0;
-    virtual void OnWarningIsReported(const WarningCode warning) = 0;
+  virtual void OnErrorIsReported(const ErrorCode error) = 0;
+  virtual void OnWarningIsReported(const WarningCode warning) = 0;
 
-protected:
-    virtual ~AudioDeviceObserver() {}
+ protected:
+  virtual ~AudioDeviceObserver() {}
 };
 
 // ----------------------------------------------------------------------------
 //  AudioTransport
 // ----------------------------------------------------------------------------
 
-class AudioTransport
-{
-public:
-    virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
-                                            const uint32_t nSamples,
-                                            const uint8_t nBytesPerSample,
-                                            const uint8_t nChannels,
-                                            const uint32_t samplesPerSec,
-                                            const uint32_t totalDelayMS,
-                                            const int32_t clockDrift,
-                                            const uint32_t currentMicLevel,
-                                            const bool keyPressed,
-                                            uint32_t& newMicLevel) = 0;
+class AudioTransport {
+ public:
+  virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
+                                          const uint32_t nSamples,
+                                          const uint8_t nBytesPerSample,
+                                          const uint8_t nChannels,
+                                          const uint32_t samplesPerSec,
+                                          const uint32_t totalDelayMS,
+                                          const int32_t clockDrift,
+                                          const uint32_t currentMicLevel,
+                                          const bool keyPressed,
+                                          uint32_t& newMicLevel) = 0;
 
-    virtual int32_t NeedMorePlayData(const uint32_t nSamples,
-                                     const uint8_t nBytesPerSample,
-                                     const uint8_t nChannels,
-                                     const uint32_t samplesPerSec,
-                                     void* audioSamples,
-                                     uint32_t& nSamplesOut,
-                                     int64_t* elapsed_time_ms,
-                                     int64_t* ntp_time_ms) = 0;
+  virtual int32_t NeedMorePlayData(const uint32_t nSamples,
+                                   const uint8_t nBytesPerSample,
+                                   const uint8_t nChannels,
+                                   const uint32_t samplesPerSec,
+                                   void* audioSamples,
+                                   uint32_t& nSamplesOut,
+                                   int64_t* elapsed_time_ms,
+                                   int64_t* ntp_time_ms) = 0;
 
-    // Method to pass captured data directly and unmixed to network channels.
-    // |channel_ids| contains a list of VoE channels which are the
-    // sinks to the capture data. |audio_delay_milliseconds| is the sum of
-    // recording delay and playout delay of the hardware. |current_volume| is
-    // in the range of [0, 255], representing the current microphone analog
-    // volume. |key_pressed| is used by the typing detection.
-    // |need_audio_processing| specify if the data needs to be processed by APM.
-    // Currently WebRtc supports only one APM, and Chrome will make sure only
-    // one stream goes through APM. When |need_audio_processing| is false, the
-    // values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
-    // will be ignored.
-    // The return value is the new microphone volume, in the range of |0, 255].
-    // When the volume does not need to be updated, it returns 0.
-    // TODO(xians): Remove this interface after Chrome and Libjingle switches
-    // to OnData().
-    virtual int OnDataAvailable(const int voe_channels[],
-                                int number_of_voe_channels,
-                                const int16_t* audio_data,
-                                int sample_rate,
-                                int number_of_channels,
-                                int number_of_frames,
-                                int audio_delay_milliseconds,
-                                int current_volume,
-                                bool key_pressed,
-                                bool need_audio_processing) { return 0; }
+  // Method to pass captured data directly and unmixed to network channels.
+  // |channel_ids| contains a list of VoE channels which are the
+  // sinks to the capture data. |audio_delay_milliseconds| is the sum of
+  // recording delay and playout delay of the hardware. |current_volume| is
+  // in the range of [0, 255], representing the current microphone analog
+  // volume. |key_pressed| is used by the typing detection.
+  // |need_audio_processing| specify if the data needs to be processed by APM.
+  // Currently WebRtc supports only one APM, and Chrome will make sure only
+  // one stream goes through APM. When |need_audio_processing| is false, the
+  // values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
+  // will be ignored.
+  // The return value is the new microphone volume, in the range of |0, 255].
+  // When the volume does not need to be updated, it returns 0.
+  // TODO(xians): Remove this interface after Chrome and Libjingle switches
+  // to OnData().
+  virtual int OnDataAvailable(const int voe_channels[],
+                              int number_of_voe_channels,
+                              const int16_t* audio_data,
+                              int sample_rate,
+                              int number_of_channels,
+                              int number_of_frames,
+                              int audio_delay_milliseconds,
+                              int current_volume,
+                              bool key_pressed,
+                              bool need_audio_processing) {
+    return 0;
+  }
 
-    // Method to pass the captured audio data to the specific VoE channel.
-    // |voe_channel| is the id of the VoE channel which is the sink to the
-    // capture data.
-    // TODO(xians): Remove this interface after Libjingle switches to
-    // PushCaptureData().
-    virtual void OnData(int voe_channel, const void* audio_data,
-                        int bits_per_sample, int sample_rate,
-                        int number_of_channels,
-                        int number_of_frames) {}
+  // Method to pass the captured audio data to the specific VoE channel.
+  // |voe_channel| is the id of the VoE channel which is the sink to the
+  // capture data.
+  // TODO(xians): Remove this interface after Libjingle switches to
+  // PushCaptureData().
+  virtual void OnData(int voe_channel,
+                      const void* audio_data,
+                      int bits_per_sample,
+                      int sample_rate,
+                      int number_of_channels,
+                      int number_of_frames) {}
 
-    // Method to push the captured audio data to the specific VoE channel.
-    // The data will not undergo audio processing.
-    // |voe_channel| is the id of the VoE channel which is the sink to the
-    // capture data.
-    // TODO(xians): Make the interface pure virtual after Libjingle
-    // has its implementation.
-    virtual void PushCaptureData(int voe_channel, const void* audio_data,
-                                 int bits_per_sample, int sample_rate,
-                                 int number_of_channels,
-                                 int number_of_frames) {}
+  // Method to push the captured audio data to the specific VoE channel.
+  // The data will not undergo audio processing.
+  // |voe_channel| is the id of the VoE channel which is the sink to the
+  // capture data.
+  // TODO(xians): Make the interface pure virtual after Libjingle
+  // has its implementation.
+  virtual void PushCaptureData(int voe_channel,
+                               const void* audio_data,
+                               int bits_per_sample,
+                               int sample_rate,
+                               int number_of_channels,
+                               int number_of_frames) {}
 
-    // Method to pull mixed render audio data from all active VoE channels.
-    // The data will not be passed as reference for audio processing internally.
-    // TODO(xians): Support getting the unmixed render data from specific VoE
-    // channel.
-    virtual void PullRenderData(int bits_per_sample, int sample_rate,
-                                int number_of_channels, int number_of_frames,
-                                void* audio_data,
-                                int64_t* elapsed_time_ms,
-                                int64_t* ntp_time_ms) {}
+  // Method to pull mixed render audio data from all active VoE channels.
+  // The data will not be passed as reference for audio processing internally.
+  // TODO(xians): Support getting the unmixed render data from specific VoE
+  // channel.
+  virtual void PullRenderData(int bits_per_sample,
+                              int sample_rate,
+                              int number_of_channels,
+                              int number_of_frames,
+                              void* audio_data,
+                              int64_t* elapsed_time_ms,
+                              int64_t* ntp_time_ms) {}
 
-protected:
-    virtual ~AudioTransport() {}
+ protected:
+  virtual ~AudioTransport() {}
+};
+
+// Helper class for storage of fundamental audio parameters such as sample rate,
+// number of channels, native buffer size etc.
+// Note that one audio frame can contain more than one channel sample and each
+// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
+// stereo contains 2 * (16/8) = 4 bytes of data.
+class AudioParameters {
+ public:
+  // This implementation does only support 16-bit PCM samples.
+  enum { kBitsPerSample = 16 };
+  AudioParameters()
+      : sample_rate_(0),
+        channels_(0),
+        frames_per_buffer_(0),
+        frames_per_10ms_buffer_(0) {}
+  AudioParameters(int sample_rate, int channels, int frames_per_buffer)
+      : sample_rate_(sample_rate),
+        channels_(channels),
+        frames_per_buffer_(frames_per_buffer),
+        frames_per_10ms_buffer_(sample_rate / 100) {}
+  void reset(int sample_rate, int channels, int frames_per_buffer) {
+    sample_rate_ = sample_rate;
+    channels_ = channels;
+    frames_per_buffer_ = frames_per_buffer;
+    frames_per_10ms_buffer_ = (sample_rate / 100);
+  }
+  int bits_per_sample() const { return kBitsPerSample; }
+  int sample_rate() const { return sample_rate_; }
+  int channels() const { return channels_; }
+  int frames_per_buffer() const { return frames_per_buffer_; }
+  int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+  bool is_valid() const {
+    return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
+  }
+  int GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
+  int GetBytesPerBuffer() const {
+    return frames_per_buffer_ * GetBytesPerFrame();
+  }
+  int GetBytesPer10msBuffer() const {
+    return frames_per_10ms_buffer_ * GetBytesPerFrame();
+  }
+  float GetBufferSizeInMilliseconds() const {
+    if (sample_rate_ == 0)
+      return 0.0f;
+    return frames_per_buffer_ / (sample_rate_ / 1000.0f);
+  }
+
+ private:
+  int sample_rate_;
+  int channels_;
+  int frames_per_buffer_;
+  int frames_per_10ms_buffer_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h
index a367450..8b21132 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -13,17 +13,14 @@
 
 #include <AudioUnit/AudioUnit.h>
 
+#include "webrtc/base/thread_checker.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 
 namespace webrtc {
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
-
-const uint32_t N_REC_CHANNELS = 1;  // default is mono recording
-const uint32_t N_PLAY_CHANNELS = 1;  // default is mono playout
-const uint32_t N_DEVICE_CHANNELS = 8;
+const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
 
 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
@@ -33,137 +30,117 @@
 
 class AudioDeviceIOS : public AudioDeviceGeneric {
  public:
-  AudioDeviceIOS(const int32_t id);
+  AudioDeviceIOS();
   ~AudioDeviceIOS();
 
-  // Retrieve the currently utilized audio layer
-  virtual int32_t ActiveAudioLayer(
-      AudioDeviceModule::AudioLayer& audioLayer) const;
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
 
-  // Main initializaton and termination
-  virtual int32_t Init();
-  virtual int32_t Terminate();
-  virtual bool Initialized() const;
+  int32_t Init() override;
+  int32_t Terminate() override;
+  bool Initialized() const override { return _initialized; }
 
-  // Device enumeration
-  virtual int16_t PlayoutDevices();
-  virtual int16_t RecordingDevices();
-  virtual int32_t PlayoutDeviceName(uint16_t index,
-                                    char name[kAdmMaxDeviceNameSize],
-                                    char guid[kAdmMaxGuidSize]);
-  virtual int32_t RecordingDeviceName(uint16_t index,
-                                      char name[kAdmMaxDeviceNameSize],
-                                      char guid[kAdmMaxGuidSize]);
+  int32_t InitPlayout() override;
+  bool PlayoutIsInitialized() const override { return _playIsInitialized; }
 
-  // Device selection
-  virtual int32_t SetPlayoutDevice(uint16_t index);
-  virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
-  virtual int32_t SetRecordingDevice(uint16_t index);
-  virtual int32_t SetRecordingDevice(
-      AudioDeviceModule::WindowsDeviceType device);
+  int32_t InitRecording() override;
+  bool RecordingIsInitialized() const override { return _recIsInitialized; }
 
-  // Audio transport initialization
-  virtual int32_t PlayoutIsAvailable(bool& available);
-  virtual int32_t InitPlayout();
-  virtual bool PlayoutIsInitialized() const;
-  virtual int32_t RecordingIsAvailable(bool& available);
-  virtual int32_t InitRecording();
-  virtual bool RecordingIsInitialized() const;
+  int32_t StartPlayout() override;
+  int32_t StopPlayout() override;
+  bool Playing() const override { return _playing; }
 
-  // Audio transport control
-  virtual int32_t StartPlayout();
-  virtual int32_t StopPlayout();
-  virtual bool Playing() const;
-  virtual int32_t StartRecording();
-  virtual int32_t StopRecording();
-  virtual bool Recording() const;
+  int32_t StartRecording() override;
+  int32_t StopRecording() override;
+  bool Recording() const override { return _recording; }
 
-  // Microphone Automatic Gain Control (AGC)
-  virtual int32_t SetAGC(bool enable);
-  virtual bool AGC() const;
+  int32_t SetLoudspeakerStatus(bool enable) override;
+  int32_t GetLoudspeakerStatus(bool& enabled) const override;
 
-  // Volume control based on the Windows Wave API (Windows only)
-  virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
-  virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
-                                uint16_t& volumeRight) const;
+  // TODO(henrika): investigate if we can reduce the complexity here.
+  // Do we even need delay estimates?
+  int32_t PlayoutDelay(uint16_t& delayMS) const override;
+  int32_t RecordingDelay(uint16_t& delayMS) const override;
 
-  // Audio mixer initialization
-  virtual int32_t InitSpeaker();
-  virtual bool SpeakerIsInitialized() const;
-  virtual int32_t InitMicrophone();
-  virtual bool MicrophoneIsInitialized() const;
+  int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+                        uint16_t& sizeMS) const override;
 
-  // Speaker volume controls
-  virtual int32_t SpeakerVolumeIsAvailable(bool& available);
-  virtual int32_t SetSpeakerVolume(uint32_t volume);
-  virtual int32_t SpeakerVolume(uint32_t& volume) const;
-  virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
-  virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
-  virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
+  // These methods are unique for the iOS implementation.
 
-  // Microphone volume controls
-  virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
-  virtual int32_t SetMicrophoneVolume(uint32_t volume);
-  virtual int32_t MicrophoneVolume(uint32_t& volume) const;
-  virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
-  virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
-  virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
+  // Native audio parameters stored during construction.
+  int GetPlayoutAudioParameters(AudioParameters* params) const override;
+  int GetRecordAudioParameters(AudioParameters* params) const override;
 
-  // Microphone mute control
-  virtual int32_t MicrophoneMuteIsAvailable(bool& available);
-  virtual int32_t SetMicrophoneMute(bool enable);
-  virtual int32_t MicrophoneMute(bool& enabled) const;
+  // These methods are currently not implemented on iOS.
+  // See audio_device_not_implemented_ios.mm for dummy implementations.
 
-  // Speaker mute control
-  virtual int32_t SpeakerMuteIsAvailable(bool& available);
-  virtual int32_t SetSpeakerMute(bool enable);
-  virtual int32_t SpeakerMute(bool& enabled) const;
-
-  // Microphone boost control
-  virtual int32_t MicrophoneBoostIsAvailable(bool& available);
-  virtual int32_t SetMicrophoneBoost(bool enable);
-  virtual int32_t MicrophoneBoost(bool& enabled) const;
-
-  // Stereo support
-  virtual int32_t StereoPlayoutIsAvailable(bool& available);
-  virtual int32_t SetStereoPlayout(bool enable);
-  virtual int32_t StereoPlayout(bool& enabled) const;
-  virtual int32_t StereoRecordingIsAvailable(bool& available);
-  virtual int32_t SetStereoRecording(bool enable);
-  virtual int32_t StereoRecording(bool& enabled) const;
-
-  // Delay information and control
-  virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
-                                   uint16_t sizeMS);
-  virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
-                                uint16_t& sizeMS) const;
-  virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
-  virtual int32_t RecordingDelay(uint16_t& delayMS) const;
-
-  // CPU load
-  virtual int32_t CPULoad(uint16_t& load) const;
-
- public:
-  virtual bool PlayoutWarning() const;
-  virtual bool PlayoutError() const;
-  virtual bool RecordingWarning() const;
-  virtual bool RecordingError() const;
-  virtual void ClearPlayoutWarning();
-  virtual void ClearPlayoutError();
-  virtual void ClearRecordingWarning();
-  virtual void ClearRecordingError();
-
- public:
-  virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
-  // Reset Audio Device (for mobile devices only)
-  virtual int32_t ResetAudioDevice();
-
-  // enable or disable loud speaker (for iphone only)
-  virtual int32_t SetLoudspeakerStatus(bool enable);
-  virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
+  int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
+  int32_t ResetAudioDevice() override;
+  int32_t PlayoutIsAvailable(bool& available) override;
+  int32_t RecordingIsAvailable(bool& available) override;
+  int32_t SetAGC(bool enable) override;
+  bool AGC() const override;
+  int16_t PlayoutDevices() override;
+  int16_t RecordingDevices() override;
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override;
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override;
+  int32_t SetPlayoutDevice(uint16_t index) override;
+  int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t SetRecordingDevice(uint16_t index) override;
+  int32_t SetRecordingDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
+  int32_t WaveOutVolume(uint16_t& volumeLeft,
+                        uint16_t& volumeRight) const override;
+  int32_t InitSpeaker() override;
+  bool SpeakerIsInitialized() const override;
+  int32_t InitMicrophone() override;
+  bool MicrophoneIsInitialized() const override;
+  int32_t SpeakerVolumeIsAvailable(bool& available) override;
+  int32_t SetSpeakerVolume(uint32_t volume) override;
+  int32_t SpeakerVolume(uint32_t& volume) const override;
+  int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+  int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+  int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override;
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+  int32_t SetMicrophoneVolume(uint32_t volume) override;
+  int32_t MicrophoneVolume(uint32_t& volume) const override;
+  int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+  int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+  int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const override;
+  int32_t MicrophoneMuteIsAvailable(bool& available) override;
+  int32_t SetMicrophoneMute(bool enable) override;
+  int32_t MicrophoneMute(bool& enabled) const override;
+  int32_t SpeakerMuteIsAvailable(bool& available) override;
+  int32_t SetSpeakerMute(bool enable) override;
+  int32_t SpeakerMute(bool& enabled) const override;
+  int32_t MicrophoneBoostIsAvailable(bool& available) override;
+  int32_t SetMicrophoneBoost(bool enable) override;
+  int32_t MicrophoneBoost(bool& enabled) const override;
+  int32_t StereoPlayoutIsAvailable(bool& available) override;
+  int32_t SetStereoPlayout(bool enable) override;
+  int32_t StereoPlayout(bool& enabled) const override;
+  int32_t StereoRecordingIsAvailable(bool& available) override;
+  int32_t SetStereoRecording(bool enable) override;
+  int32_t StereoRecording(bool& enabled) const override;
+  int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+                           uint16_t sizeMS) override;
+  int32_t CPULoad(uint16_t& load) const override;
+  bool PlayoutWarning() const override;
+  bool PlayoutError() const override;
+  bool RecordingWarning() const override;
+  bool RecordingError() const override;
+  void ClearPlayoutWarning() override{};
+  void ClearPlayoutError() override{};
+  void ClearRecordingWarning() override{};
+  void ClearRecordingError() override{};
 
  private:
+  // TODO(henrika): try to remove these.
   void Lock() {
     _critSect.Enter();
   }
@@ -172,10 +149,6 @@
     _critSect.Leave();
   }
 
-  int32_t Id() {
-    return _id;
-  }
-
   // Init and shutdown
   int32_t InitPlayOrRecord();
   int32_t ShutdownPlayOrRecord();
@@ -209,18 +182,24 @@
   bool CaptureWorkerThread();
 
  private:
-  AudioDeviceBuffer* _ptrAudioBuffer;
+  rtc::ThreadChecker thread_checker_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
+  // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+  // and therefore outlives this object.
+  AudioDeviceBuffer* audio_device_buffer_;
 
   CriticalSectionWrapper& _critSect;
 
-  rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
 
-  int32_t _id;
+  rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
 
   AudioUnit _auVoiceProcessing;
   void* _audioInterruptionObserver;
 
- private:
   bool _initialized;
   bool _isShutDown;
   bool _recording;
@@ -228,15 +207,8 @@
   bool _recIsInitialized;
   bool _playIsInitialized;
 
-  bool _recordingDeviceIsSpecified;
-  bool _playoutDeviceIsSpecified;
-  bool _micIsInitialized;
-  bool _speakerIsInitialized;
-
-  bool _AGC;
-
   // The sampling rate to use with Audio Device Buffer
-  uint32_t _adbSampFreq;
+  int _adbSampFreq;
 
   // Delay calculation
   uint32_t _recordingDelay;
@@ -245,12 +217,6 @@
   uint32_t _recordingDelayHWAndOS;
   uint32_t _recordingDelayMeasurementCounter;
 
-  // Errors and warnings count
-  uint16_t _playWarning;
-  uint16_t _playError;
-  uint16_t _recWarning;
-  uint16_t _recError;
-
   // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
   int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
   uint32_t  _playoutBufferUsed;  // How much is filled
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm
index 47503a9..cb15032 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm
@@ -8,1129 +8,417 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
 #import <AVFoundation/AVFoundation.h>
 #import <Foundation/Foundation.h>
 
 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+#include "webrtc/modules/utility/interface/helpers_ios.h"
 
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
-AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
-    :
-    _ptrAudioBuffer(NULL),
-    _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
-    _id(id),
-    _auVoiceProcessing(NULL),
-    _audioInterruptionObserver(NULL),
-    _initialized(false),
-    _isShutDown(false),
-    _recording(false),
-    _playing(false),
-    _recIsInitialized(false),
-    _playIsInitialized(false),
-    _recordingDeviceIsSpecified(false),
-    _playoutDeviceIsSpecified(false),
-    _micIsInitialized(false),
-    _speakerIsInitialized(false),
-    _AGC(false),
-    _adbSampFreq(0),
-    _recordingDelay(0),
-    _playoutDelay(0),
-    _playoutDelayMeasurementCounter(9999),
-    _recordingDelayHWAndOS(0),
-    _recordingDelayMeasurementCounter(9999),
-    _playWarning(0),
-    _playError(0),
-    _recWarning(0),
-    _recError(0),
-    _playoutBufferUsed(0),
-    _recordingCurrentSeq(0),
-    _recordingBufferTotalSize(0) {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
-                 "%s created", __FUNCTION__);
 
-    memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
-    memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
-    memset(_recordingLength, 0, sizeof(_recordingLength));
-    memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
+#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
+
+using ios::CheckAndLogError;
+
+#if !defined(NDEBUG)
+static void LogDeviceInfo() {
+  LOG(LS_INFO) << "LogDeviceInfo";
+  @autoreleasepool {
+    LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+    LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
+    LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+    LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+  }
+}
+#endif
+
+static void ActivateAudioSession(AVAudioSession* session, bool activate) {
+  LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
+  @autoreleasepool {
+    NSError* error = nil;
+    BOOL success = NO;
+    if (!activate) {
+      // Deactivate the audio session.
+      success = [session setActive:NO error:&error];
+      DCHECK(CheckAndLogError(success, error));
+      return;
+    }
+    // Activate an audio session and set category and mode. Only make changes
+    // if needed since setting them to the value they already have will clear
+    // transient properties (such as PortOverride) that some other component
+    // have set up.
+    if (session.category != AVAudioSessionCategoryPlayAndRecord) {
+      error = nil;
+      success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+                               error:&error];
+      DCHECK(CheckAndLogError(success, error));
+    }
+    if (session.mode != AVAudioSessionModeVoiceChat) {
+      error = nil;
+      success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
+      DCHECK(CheckAndLogError(success, error));
+    }
+    error = nil;
+    success = [session setActive:YES error:&error];
+    DCHECK(CheckAndLogError(success, error));
+    // Ensure that category and mode are actually activated.
+    DCHECK(
+        [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
+    DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
+  }
+}
+
+// Query hardware characteristics, such as input and output latency, input and
+// output channel count, hardware sample rate, hardware volume setting, and
+// whether audio input is available. To obtain meaningful values for hardware
+// characteristics,the audio session must be initialized and active before we
+// query the values.
+// TODO(henrika): Note that these characteristics can change at runtime. For
+// instance, input sample rate may change when a user plugs in a headset.
+static void GetHardwareAudioParameters(AudioParameters* playout_parameters,
+                                       AudioParameters* record_parameters) {
+  LOG(LS_INFO) << "GetHardwareAudioParameters";
+  @autoreleasepool {
+    // Implicit initialization happens when we obtain a reference to the
+    // AVAudioSession object.
+    AVAudioSession* session = [AVAudioSession sharedInstance];
+    // Always get values when the audio session is active.
+    ActivateAudioSession(session, true);
+    CHECK(session.isInputAvailable) << "No input path is available!";
+    // Get current hardware parameters.
+    double sample_rate = (double)session.sampleRate;
+    double io_buffer_duration = (double)session.IOBufferDuration;
+    int output_channels = (int)session.outputNumberOfChannels;
+    int input_channels = (int)session.inputNumberOfChannels;
+    int frames_per_buffer =
+        static_cast<int>(sample_rate * io_buffer_duration + 0.5);
+    // Copy hardware parameters to output parameters.
+    playout_parameters->reset(sample_rate, output_channels, frames_per_buffer);
+    record_parameters->reset(sample_rate, input_channels, frames_per_buffer);
+    // Add logging for debugging purposes.
+    LOG(LS_INFO) << " sample rate: " << sample_rate;
+    LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
+    LOG(LS_INFO) << " frames_per_buffer: " << frames_per_buffer;
+    LOG(LS_INFO) << " output channels: " << output_channels;
+    LOG(LS_INFO) << " input channels: " << input_channels;
+    LOG(LS_INFO) << " output latency: " << (double)session.outputLatency;
+    LOG(LS_INFO) << " input latency: " << (double)session.inputLatency;
+    // Don't keep the audio session active. Instead, deactivate when needed.
+    ActivateAudioSession(session, false);
+    // TODO(henrika): to be extra safe, we can do more here. E.g., set
+    // preferred values for sample rate, channels etc., re-activate an audio
+    // session and verify the actual values again. Then we know for sure that
+    // the current values will in fact be correct. Or, we can skip all this
+    // and check setting when audio is started. Probably better.
+  }
+}
+
+AudioDeviceIOS::AudioDeviceIOS()
+    : audio_device_buffer_(nullptr),
+      _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+      _auVoiceProcessing(nullptr),
+      _audioInterruptionObserver(nullptr),
+      _initialized(false),
+      _isShutDown(false),
+      _recording(false),
+      _playing(false),
+      _recIsInitialized(false),
+      _playIsInitialized(false),
+      _adbSampFreq(0),
+      _recordingDelay(0),
+      _playoutDelay(0),
+      _playoutDelayMeasurementCounter(9999),
+      _recordingDelayHWAndOS(0),
+      _recordingDelayMeasurementCounter(9999),
+      _playoutBufferUsed(0),
+      _recordingCurrentSeq(0),
+      _recordingBufferTotalSize(0) {
+  LOGI() << "ctor" << ios::GetCurrentThreadDescription();
+  memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+  memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
+  memset(_recordingLength, 0, sizeof(_recordingLength));
+  memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
 }
 
 AudioDeviceIOS::~AudioDeviceIOS() {
-    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
-                 "%s destroyed", __FUNCTION__);
-
-    Terminate();
-
-    delete &_critSect;
+  LOGI() << "~dtor";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  Terminate();
+  delete &_critSect;
 }
 
-
-// ============================================================================
-//                                     API
-// ============================================================================
-
 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    _ptrAudioBuffer = audioBuffer;
-
-    // inform the AudioBuffer about default settings for this implementation
-    _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
-    _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
-    _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
-    _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
-}
-
-int32_t AudioDeviceIOS::ActiveAudioLayer(
-    AudioDeviceModule::AudioLayer& audioLayer) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
-    return 0;
+  LOGI() << "AttachAudioBuffer";
+  DCHECK(audioBuffer);
+  DCHECK(thread_checker_.CalledOnValidThread());
+  audio_device_buffer_ = audioBuffer;
 }
 
 int32_t AudioDeviceIOS::Init() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (_initialized) {
-        return 0;
-    }
-
-    _isShutDown = false;
-
-    // Create and start capture thread
-    if (!_captureWorkerThread) {
-        _captureWorkerThread = ThreadWrapper::CreateThread(
-            RunCapture, this, "CaptureWorkerThread");
-        bool res = _captureWorkerThread->Start();
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
-                     _id, "CaptureWorkerThread started (res=%d)", res);
-        _captureWorkerThread->SetPriority(kRealtimePriority);
-    } else {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
-                     _id, "Thread already created");
-    }
-    _playWarning = 0;
-    _playError = 0;
-    _recWarning = 0;
-    _recError = 0;
-
-    _initialized = true;
-
+  LOGI() << "Init";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (_initialized) {
     return 0;
+  }
+#if !defined(NDEBUG)
+  LogDeviceInfo();
+#endif
+  // Query hardware audio parameters and cache the results. These parameters
+  // will be used as preferred values later when streaming starts.
+  // Note that I override these "optimal" value below since I don't want to
+  // modify the existing behavior yet.
+  GetHardwareAudioParameters(&playout_parameters_, &record_parameters_);
+  // TODO(henrika): these parameters are currently hard coded to match the
+  // existing implementation where we always use 16kHz as preferred sample
+  // rate and mono only. Goal is to improve this scheme and make it more
+  // flexible. In addition, a better native buffer size shall be derived.
+  // Using 10ms as default here (only used by unit test so far).
+  // We should also implemented observers for notification of any change in
+  // these parameters.
+  playout_parameters_.reset(16000, 1, 160);
+  record_parameters_.reset(16000, 1, 160);
+
+  // AttachAudioBuffer() is called at construction by the main class but check
+  // just in case.
+  DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+  // Inform the audio device buffer (ADB) about the new audio format.
+  // TODO(henrika): try to improve this section.
+  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+  audio_device_buffer_->SetRecordingSampleRate(
+      record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+
+  DCHECK(!_captureWorkerThread);
+  // Create and start the capture thread.
+  // TODO(henrika): do we need this thread?
+  _isShutDown = false;
+  _captureWorkerThread =
+      ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
+  if (!_captureWorkerThread->Start()) {
+    LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!";
+    return -1;
+  }
+  _captureWorkerThread->SetPriority(kRealtimePriority);
+  _initialized = true;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::Terminate() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    if (!_initialized) {
-        return 0;
-    }
-
-
-    // Stop capture thread
-    if (_captureWorkerThread) {
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
-                     _id, "Stopping CaptureWorkerThread");
-        bool res = _captureWorkerThread->Stop();
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
-                     _id, "CaptureWorkerThread stopped (res=%d)", res);
-        _captureWorkerThread.reset();
-    }
-
-    // Shut down Audio Unit
-    ShutdownPlayOrRecord();
-
-    _isShutDown = true;
-    _initialized = false;
-    _speakerIsInitialized = false;
-    _micIsInitialized = false;
-    _playoutDeviceIsSpecified = false;
-    _recordingDeviceIsSpecified = false;
+  LOGI() << "Terminate";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (!_initialized) {
     return 0;
-}
-
-bool AudioDeviceIOS::Initialized() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    return (_initialized);
-}
-
-int32_t AudioDeviceIOS::InitSpeaker() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_initialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Not initialized");
-        return -1;
-    }
-
-    if (_playing) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Cannot init speaker when playing");
-        return -1;
-    }
-
-    if (!_playoutDeviceIsSpecified) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Playout device is not specified");
-        return -1;
-    }
-
-    // Do nothing
-    _speakerIsInitialized = true;
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::InitMicrophone() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_initialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Not initialized");
-        return -1;
-    }
-
-    if (_recording) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Cannot init mic when recording");
-        return -1;
-    }
-
-    if (!_recordingDeviceIsSpecified) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
-                     _id, "  Recording device is not specified");
-        return -1;
-    }
-
-    // Do nothing
-
-    _micIsInitialized = true;
-
-    return 0;
-}
-
-bool AudioDeviceIOS::SpeakerIsInitialized() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    return _speakerIsInitialized;
-}
-
-bool AudioDeviceIOS::MicrophoneIsInitialized() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    return _micIsInitialized;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Speaker volume not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
-                                     uint16_t volumeRight) {
-    WEBRTC_TRACE(
-        kTraceModuleCall,
-        kTraceAudioDevice,
-        _id,
-        "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
-        volumeLeft, volumeRight);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-
-    return -1;
-}
-
-int32_t
-AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
-                                 uint16_t& /*volumeRight*/) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::MinSpeakerVolume(
-    uint32_t& minVolume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Speaker mute not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Mic mute not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Mic boost not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
-
-    if (!_micIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Microphone not initialized");
-        return -1;
-    }
-
-    if (enable) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  SetMicrophoneBoost cannot be enabled on this platform");
-        return -1;
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    if (!_micIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Microphone not initialized");
-        return -1;
-    }
-
-    enabled = false;
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Stereo recording not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
-
-    if (enable) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     " Stereo recording is not supported on this platform");
-        return -1;
-    }
-    return 0;
-}
-
-int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    enabled = false;
-    return 0;
-}
-
-int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Stereo playout not supported on iOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
-
-    if (enable) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     " Stereo playout is not supported on this platform");
-        return -1;
-    }
-    return 0;
-}
-
-int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    enabled = false;
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetAGC(bool enable) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetAGC(enable=%d)", enable);
-
-    _AGC = enable;
-
-    return 0;
-}
-
-bool AudioDeviceIOS::AGC() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    return _AGC;
-}
-
-int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    available = false;  // Mic volume not supported on IOS
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::MicrophoneVolumeStepSize(
-                                            uint16_t& stepSize) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-int16_t AudioDeviceIOS::PlayoutDevices() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-
-    return (int16_t)1;
-}
-
-int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
-
-    if (_playIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Playout already initialized");
-        return -1;
-    }
-
-    if (index !=0) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  SetPlayoutDevice invalid index");
-        return -1;
-    }
-    _playoutDeviceIsSpecified = true;
-
-    return 0;
-}
-
-int32_t
-    AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "WindowsDeviceType not supported");
-    return -1;
-}
-
-int32_t
-    AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
-                                         char name[kAdmMaxDeviceNameSize],
-                                         char guid[kAdmMaxGuidSize]) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
-
-    if (index != 0) {
-        return -1;
-    }
-    // return empty strings
-    memset(name, 0, kAdmMaxDeviceNameSize);
-    if (guid != NULL) {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return 0;
-}
-
-int32_t
-    AudioDeviceIOS::RecordingDeviceName(uint16_t index,
-                                           char name[kAdmMaxDeviceNameSize],
-                                           char guid[kAdmMaxGuidSize]) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
-
-    if (index != 0) {
-        return -1;
-    }
-    // return empty strings
-    memset(name, 0, kAdmMaxDeviceNameSize);
-    if (guid != NULL) {
-        memset(guid, 0, kAdmMaxGuidSize);
-    }
-
-    return 0;
-}
-
-int16_t AudioDeviceIOS::RecordingDevices() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    return (int16_t)1;
-}
-
-int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
-
-    if (_recIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Recording already initialized");
-        return -1;
-    }
-
-    if (index !=0) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  SetRecordingDevice invalid index");
-        return -1;
-    }
-
-    _recordingDeviceIsSpecified = true;
-
-    return 0;
-}
-
-int32_t
-    AudioDeviceIOS::SetRecordingDevice(
-                                        AudioDeviceModule::WindowsDeviceType) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                 "WindowsDeviceType not supported");
-    return -1;
-}
-
-// ----------------------------------------------------------------------------
-//  SetLoudspeakerStatus
-//
-//  Change the default receiver playout route to speaker.
-//
-// ----------------------------------------------------------------------------
-
-int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
-
-    AVAudioSession* session = [AVAudioSession sharedInstance];
-    NSString* category = session.category;
-    AVAudioSessionCategoryOptions options = session.categoryOptions;
-    // Respect old category options if category is
-    // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
-    // might not be valid for this category.
-    if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
-      if (enable) {
-        options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
-      } else {
-        options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
-      }
-    } else {
-      options = AVAudioSessionCategoryOptionDefaultToSpeaker;
-    }
-
-    NSError* error = nil;
-    [session setCategory:AVAudioSessionCategoryPlayAndRecord
-             withOptions:options
-                   error:&error];
-    if (error != nil) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                   "Error changing default output route ");
+  }
+  // Stop the capture thread.
+  if (_captureWorkerThread) {
+    if (!_captureWorkerThread->Stop()) {
+      LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!";
       return -1;
     }
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
-
-    AVAudioSession* session = [AVAudioSession sharedInstance];
-    AVAudioSessionCategoryOptions options = session.categoryOptions;
-    enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    available = false;
-
-    // Try to initialize the playout side
-    int32_t res = InitPlayout();
-
-    // Cancel effect of initialization
-    StopPlayout();
-
-    if (res != -1) {
-        available = true;
-    }
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    available = false;
-
-    // Try to initialize the recording side
-    int32_t res = InitRecording();
-
-    // Cancel effect of initialization
-    StopRecording();
-
-    if (res != -1) {
-        available = true;
-    }
-
-    return 0;
+    _captureWorkerThread.reset();
+  }
+  ShutdownPlayOrRecord();
+  _isShutDown = true;
+  _initialized = false;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::InitPlayout() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_initialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "  Not initialized");
-        return -1;
+  LOGI() << "InitPlayout";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(_initialized);
+  DCHECK(!_playIsInitialized);
+  DCHECK(!_playing);
+  if (!_recIsInitialized) {
+    if (InitPlayOrRecord() == -1) {
+      LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+      return -1;
     }
-
-    if (_playing) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  Playout already started");
-        return -1;
-    }
-
-    if (_playIsInitialized) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Playout already initialized");
-        return 0;
-    }
-
-    if (!_playoutDeviceIsSpecified) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Playout device is not specified");
-        return -1;
-    }
-
-    // Initialize the speaker
-    if (InitSpeaker() == -1) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  InitSpeaker() failed");
-    }
-
-    _playIsInitialized = true;
-
-    if (!_recIsInitialized) {
-        // Audio init
-        if (InitPlayOrRecord() == -1) {
-            // todo: Handle error
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                         "  InitPlayOrRecord() failed");
-        }
-    } else {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-        "  Recording already initialized - InitPlayOrRecord() not called");
-    }
-
-    return 0;
-}
-
-bool AudioDeviceIOS::PlayoutIsInitialized() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-    return (_playIsInitialized);
+  }
+  _playIsInitialized = true;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::InitRecording() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_initialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Not initialized");
-        return -1;
+  LOGI() << "InitPlayout";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(_initialized);
+  DCHECK(!_recIsInitialized);
+  DCHECK(!_recording);
+  if (!_playIsInitialized) {
+    if (InitPlayOrRecord() == -1) {
+      LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+      return -1;
     }
-
-    if (_recording) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  Recording already started");
-        return -1;
-    }
-
-    if (_recIsInitialized) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Recording already initialized");
-        return 0;
-    }
-
-    if (!_recordingDeviceIsSpecified) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Recording device is not specified");
-        return -1;
-    }
-
-    // Initialize the microphone
-    if (InitMicrophone() == -1) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  InitMicrophone() failed");
-    }
-
-    _recIsInitialized = true;
-
-    if (!_playIsInitialized) {
-        // Audio init
-        if (InitPlayOrRecord() == -1) {
-            // todo: Handle error
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                         "  InitPlayOrRecord() failed");
-        }
-    } else {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Playout already initialized - InitPlayOrRecord() " \
-                     "not called");
-    }
-
-    return 0;
-}
-
-bool AudioDeviceIOS::RecordingIsInitialized() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-    return (_recIsInitialized);
-}
-
-int32_t AudioDeviceIOS::StartRecording() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_recIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Recording not initialized");
-        return -1;
-    }
-
-    if (_recording) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Recording already started");
-        return 0;
-    }
-
-    // Reset recording buffer
-    memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
-    memset(_recordingLength, 0, sizeof(_recordingLength));
-    memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
-    _recordingCurrentSeq = 0;
-    _recordingBufferTotalSize = 0;
-    _recordingDelay = 0;
-    _recordingDelayHWAndOS = 0;
-    // Make sure first call to update delay function will update delay
-    _recordingDelayMeasurementCounter = 9999;
-    _recWarning = 0;
-    _recError = 0;
-
-    if (!_playing) {
-        // Start Audio Unit
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                     "  Starting Audio Unit");
-        OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                         "  Error starting Audio Unit (result=%d)", result);
-            return -1;
-        }
-    }
-
-    _recording = true;
-
-    return 0;
-}
-
-int32_t AudioDeviceIOS::StopRecording() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_recIsInitialized) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Recording is not initialized");
-        return 0;
-    }
-
-    _recording = false;
-
-    if (!_playing) {
-        // Both playout and recording has stopped, shutdown the device
-        ShutdownPlayOrRecord();
-    }
-
-    _recIsInitialized = false;
-    _micIsInitialized = false;
-
-    return 0;
-}
-
-bool AudioDeviceIOS::Recording() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-    return (_recording);
+  }
+  _recIsInitialized = true;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::StartPlayout() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+  LOGI() << "StartPlayout";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(_playIsInitialized);
+  DCHECK(!_playing);
 
-    // This lock is (among other things) needed to avoid concurrency issues
-    // with capture thread
-    // shutting down Audio Unit
-    CriticalSectionScoped lock(&_critSect);
+  CriticalSectionScoped lock(&_critSect);
 
-    if (!_playIsInitialized) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Playout not initialized");
-        return -1;
+  memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+  _playoutBufferUsed = 0;
+  _playoutDelay = 0;
+  // Make sure first call to update delay function will update delay
+  _playoutDelayMeasurementCounter = 9999;
+
+  if (!_recording) {
+    OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
+    if (result != noErr) {
+      LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+      return -1;
     }
-
-    if (_playing) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Playing already started");
-        return 0;
-    }
-
-    // Reset playout buffer
-    memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
-    _playoutBufferUsed = 0;
-    _playoutDelay = 0;
-    // Make sure first call to update delay function will update delay
-    _playoutDelayMeasurementCounter = 9999;
-    _playWarning = 0;
-    _playError = 0;
-
-    if (!_recording) {
-        // Start Audio Unit
-        WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                     "  Starting Audio Unit");
-        OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                         "  Error starting Audio Unit (result=%d)", result);
-            return -1;
-        }
-    }
-
-    _playing = true;
-
-    return 0;
+  }
+  _playing = true;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::StopPlayout() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_playIsInitialized) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Playout is not initialized");
-        return 0;
-    }
-
-    _playing = false;
-
-    if (!_recording) {
-        // Both playout and recording has stopped, signal shutdown the device
-        ShutdownPlayOrRecord();
-    }
-
-    _playIsInitialized = false;
-    _speakerIsInitialized = false;
-
+  LOGI() << "StopPlayout";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (!_playIsInitialized || !_playing) {
     return 0;
+  }
+
+  CriticalSectionScoped lock(&_critSect);
+
+  if (!_recording) {
+    // Both playout and recording has stopped, shutdown the device.
+    ShutdownPlayOrRecord();
+  }
+  _playIsInitialized = false;
+  _playing = false;
+  return 0;
 }
 
-bool AudioDeviceIOS::Playing() const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "%s", __FUNCTION__);
-    return (_playing);
+int32_t AudioDeviceIOS::StartRecording() {
+  LOGI() << "StartRecording";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(_recIsInitialized);
+  DCHECK(!_recording);
+
+  CriticalSectionScoped lock(&_critSect);
+
+  memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
+  memset(_recordingLength, 0, sizeof(_recordingLength));
+  memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
+
+  _recordingCurrentSeq = 0;
+  _recordingBufferTotalSize = 0;
+  _recordingDelay = 0;
+  _recordingDelayHWAndOS = 0;
+  // Make sure first call to update delay function will update delay
+  _recordingDelayMeasurementCounter = 9999;
+
+  if (!_playing) {
+    OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
+    if (result != noErr) {
+      LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+      return -1;
+    }
+  }
+  _recording = true;
+  return 0;
 }
 
-// ----------------------------------------------------------------------------
-//  ResetAudioDevice
-//
-//  Disable playout and recording, signal to capture thread to shutdown,
-//  and set enable states after shutdown to same as current.
-//  In capture thread audio device will be shutdown, then started again.
-// ----------------------------------------------------------------------------
-int32_t AudioDeviceIOS::ResetAudioDevice() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    CriticalSectionScoped lock(&_critSect);
-
-    if (!_playIsInitialized && !_recIsInitialized) {
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Playout or recording not initialized, doing nothing");
-        return 0;  // Nothing to reset
-    }
-
-    // Store the states we have before stopping to restart below
-    bool initPlay = _playIsInitialized;
-    bool play = _playing;
-    bool initRec = _recIsInitialized;
-    bool rec = _recording;
-
-    int res(0);
-
-    // Stop playout and recording
-    WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "  Stopping playout and recording");
-    res += StopPlayout();
-    res += StopRecording();
-
-    // Restart
-    WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "  Restarting playout and recording (%d, %d, %d, %d)",
-                 initPlay, play, initRec, rec);
-    if (initPlay) res += InitPlayout();
-    if (initRec)  res += InitRecording();
-    if (play)     res += StartPlayout();
-    if (rec)      res += StartRecording();
-
-    if (0 != res) {
-        // Logging is done in init/start/stop calls above
-        return -1;
-    }
-
+int32_t AudioDeviceIOS::StopRecording() {
+  LOGI() << "StopRecording";
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (!_recIsInitialized || !_recording) {
     return 0;
+  }
+
+  CriticalSectionScoped lock(&_critSect);
+
+  if (!_playing) {
+    // Both playout and recording has stopped, shutdown the device.
+    ShutdownPlayOrRecord();
+  }
+  _recIsInitialized = false;
+  _recording = false;
+  return 0;
+}
+
+// Change the default receiver playout route to speaker.
+int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
+  LOGI() << "SetLoudspeakerStatus(" << enable << ")";
+
+  AVAudioSession* session = [AVAudioSession sharedInstance];
+  NSString* category = session.category;
+  AVAudioSessionCategoryOptions options = session.categoryOptions;
+  // Respect old category options if category is
+  // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
+  // might not be valid for this category.
+  if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
+    if (enable) {
+      options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+    } else {
+      options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
+    }
+  } else {
+    options = AVAudioSessionCategoryOptionDefaultToSpeaker;
+  }
+  NSError* error = nil;
+  BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+                          withOptions:options
+                                error:&error];
+  ios::CheckAndLogError(success, error);
+  return (error == nil) ? 0 : -1;
+}
+
+int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
+  LOGI() << "GetLoudspeakerStatus";
+  AVAudioSession* session = [AVAudioSession sharedInstance];
+  AVAudioSessionCategoryOptions options = session.categoryOptions;
+  enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
-    delayMS = _playoutDelay;
-    return 0;
+  delayMS = _playoutDelay;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
-    delayMS = _recordingDelay;
-    return 0;
+  delayMS = _recordingDelay;
+  return 0;
 }
 
-int32_t
-    AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
-                                     uint16_t sizeMS) {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
-                 "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
-                 type, sizeMS);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
+int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+                                      uint16_t& sizeMS) const {
+  type = AudioDeviceModule::kAdaptiveBufferSize;
+  sizeMS = _playoutDelay;
+  return 0;
 }
 
-int32_t
-    AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
-                                     uint16_t& sizeMS) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    type = AudioDeviceModule::kAdaptiveBufferSize;
-
-    sizeMS = _playoutDelay;
-
-    return 0;
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+  CHECK(playout_parameters_.is_valid());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  *params = playout_parameters_;
+  return 0;
 }
 
-int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
-    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                 "  API call not supported on this platform");
-    return -1;
-}
-
-bool AudioDeviceIOS::PlayoutWarning() const {
-    return (_playWarning > 0);
-}
-
-bool AudioDeviceIOS::PlayoutError() const {
-    return (_playError > 0);
-}
-
-bool AudioDeviceIOS::RecordingWarning() const {
-    return (_recWarning > 0);
-}
-
-bool AudioDeviceIOS::RecordingError() const {
-    return (_recError > 0);
-}
-
-void AudioDeviceIOS::ClearPlayoutWarning() {
-    _playWarning = 0;
-}
-
-void AudioDeviceIOS::ClearPlayoutError() {
-    _playError = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingWarning() {
-    _recWarning = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingError() {
-    _recError = 0;
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+  CHECK(record_parameters_.is_valid());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  *params = record_parameters_;
+  return 0;
 }
 
 // ============================================================================
@@ -1138,775 +426,628 @@
 // ============================================================================
 
 int32_t AudioDeviceIOS::InitPlayOrRecord() {
-    WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+  LOGI() << "AudioDeviceIOS::InitPlayOrRecord";
+  DCHECK(!_auVoiceProcessing);
 
-    OSStatus result = -1;
+  OSStatus result = -1;
 
-    // Check if already initialized
-    if (NULL != _auVoiceProcessing) {
-        // We already have initialized before and created any of the audio unit,
-        // check that all exist
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "  Already initialized");
-        // todo: Call AudioUnitReset() here and empty all buffers?
-        return 0;
-    }
+  // Create Voice Processing Audio Unit
+  AudioComponentDescription desc;
+  AudioComponent comp;
 
-    // Create Voice Processing Audio Unit
-    AudioComponentDescription desc;
-    AudioComponent comp;
+  desc.componentType = kAudioUnitType_Output;
+  desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+  desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+  desc.componentFlags = 0;
+  desc.componentFlagsMask = 0;
 
-    desc.componentType = kAudioUnitType_Output;
-    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
-    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
-    desc.componentFlags = 0;
-    desc.componentFlagsMask = 0;
+  comp = AudioComponentFindNext(nullptr, &desc);
+  if (nullptr == comp) {
+    LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit";
+    return -1;
+  }
 
-    comp = AudioComponentFindNext(NULL, &desc);
-    if (NULL == comp) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Could not find audio component for Audio Unit");
-        return -1;
-    }
+  result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result;
+    return -1;
+  }
 
-    result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Could not create Audio Unit instance (result=%d)",
-                     result);
-        return -1;
-    }
+  // TODO(henrika): I think we should set the preferred channel configuration
+  // in both directions as well to be safe.
 
-    // Set preferred hardware sample rate to 16 kHz
-    NSError* error = nil;
-    AVAudioSession* session = [AVAudioSession sharedInstance];
-    Float64 preferredSampleRate(16000.0);
-    [session setPreferredSampleRate:preferredSampleRate
-                              error:&error];
-    if (error != nil) {
-        const char* errorString = [[error localizedDescription] UTF8String];
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Could not set preferred sample rate: %s", errorString);
-    }
-    error = nil;
-    // Make the setMode:error: and setCategory:error: calls only if necessary.
-    // Non-obviously, setting them to the value they already have will clear
-    // transient properties (such as PortOverride) that some other component may
-    // have set up.
-    if (session.mode != AVAudioSessionModeVoiceChat) {
-      [session setMode:AVAudioSessionModeVoiceChat error:&error];
-      if (error != nil) {
-        const char* errorString = [[error localizedDescription] UTF8String];
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Could not set mode: %s", errorString);
-      }
-    }
-    error = nil;
-    if (session.category != AVAudioSessionCategoryPlayAndRecord) {
-      [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
-      if (error != nil) {
-        const char* errorString = [[error localizedDescription] UTF8String];
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Could not set category: %s", errorString);
-      }
-    }
+  // Set preferred hardware sample rate to 16 kHz.
+  // TODO(henrika): improve this selection of sample rate. Why do we currently
+  // use a hard coded value? How can we fail and still continue?
+  NSError* error = nil;
+  AVAudioSession* session = [AVAudioSession sharedInstance];
+  Float64 preferredSampleRate(playout_parameters_.sample_rate());
+  [session setPreferredSampleRate:preferredSampleRate error:&error];
+  if (error != nil) {
+    const char* errorString = [[error localizedDescription] UTF8String];
+    LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString;
+  }
 
-    //////////////////////
-    // Setup Voice Processing Audio Unit
+  // TODO(henrika): we can reduce latency by setting the IOBufferDuration
+  // here. Default size for 16kHz is 0.016 sec or 16 msec on an iPhone 6.
 
-    // Note: For Signal Processing AU element 0 is output bus, element 1 is
-    //       input bus for global scope element is irrelevant (always use
-    //       element 0)
+  // Activate the audio session.
+  ActivateAudioSession(session, true);
 
-    // Enable IO on both elements
+  UInt32 enableIO = 1;
+  result = AudioUnitSetProperty(_auVoiceProcessing,
+                                kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Input,
+                                1,  // input bus
+                                &enableIO, sizeof(enableIO));
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result;
+  }
 
-    // todo: Below we just log and continue upon error. We might want
-    //       to close AU and return error for some cases.
-    // todo: Log info about setup.
+  result = AudioUnitSetProperty(_auVoiceProcessing,
+                                kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Output,
+                                0,  // output bus
+                                &enableIO, sizeof(enableIO));
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result;
+  }
 
-    UInt32 enableIO = 1;
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioOutputUnitProperty_EnableIO,
-                                  kAudioUnitScope_Input,
-                                  1,  // input bus
-                                  &enableIO,
-                                  sizeof(enableIO));
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Could not enable IO on input (result=%d)", result);
-    }
+  // Disable AU buffer allocation for the recorder, we allocate our own.
+  // TODO(henrika): understand this part better.
+  UInt32 flag = 0;
+  result = AudioUnitSetProperty(_auVoiceProcessing,
+                                kAudioUnitProperty_ShouldAllocateBuffer,
+                                kAudioUnitScope_Output, 1, &flag, sizeof(flag));
+  if (0 != result) {
+    LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result;
+    // Should work anyway
+  }
 
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioOutputUnitProperty_EnableIO,
-                                  kAudioUnitScope_Output,
-                                  0,   // output bus
-                                  &enableIO,
-                                  sizeof(enableIO));
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Could not enable IO on output (result=%d)", result);
-    }
+  // Set recording callback.
+  AURenderCallbackStruct auCbS;
+  memset(&auCbS, 0, sizeof(auCbS));
+  auCbS.inputProc = RecordProcess;
+  auCbS.inputProcRefCon = this;
+  result = AudioUnitSetProperty(
+      _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback,
+      kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS));
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result;
+  }
 
-    // Disable AU buffer allocation for the recorder, we allocate our own
-    UInt32 flag = 0;
-    result = AudioUnitSetProperty(
-        _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
-        kAudioUnitScope_Output,  1, &flag, sizeof(flag));
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  Could not disable AU buffer allocation (result=%d)",
-                     result);
-        // Should work anyway
-    }
+  // Set playout callback.
+  memset(&auCbS, 0, sizeof(auCbS));
+  auCbS.inputProc = PlayoutProcess;
+  auCbS.inputProcRefCon = this;
+  result = AudioUnitSetProperty(
+      _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback,
+      kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS));
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result;
+  }
 
-    // Set recording callback
-    AURenderCallbackStruct auCbS;
-    memset(&auCbS, 0, sizeof(auCbS));
-    auCbS.inputProc = RecordProcess;
-    auCbS.inputProcRefCon = this;
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioOutputUnitProperty_SetInputCallback,
-                                  kAudioUnitScope_Global, 1,
-                                  &auCbS, sizeof(auCbS));
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set record callback for Audio Unit (result=%d)",
-            result);
-    }
+  // Get stream format for out/0
+  AudioStreamBasicDescription playoutDesc;
+  UInt32 size = sizeof(playoutDesc);
+  result =
+      AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Output, 0, &playoutDesc, &size);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result;
+  }
 
-    // Set playout callback
-    memset(&auCbS, 0, sizeof(auCbS));
-    auCbS.inputProc = PlayoutProcess;
-    auCbS.inputProcRefCon = this;
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioUnitProperty_SetRenderCallback,
-                                  kAudioUnitScope_Global, 0,
-                                  &auCbS, sizeof(auCbS));
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set play callback for Audio Unit (result=%d)",
-            result);
-    }
+  playoutDesc.mSampleRate = preferredSampleRate;
+  LOG(LS_INFO) << "Audio Unit playout opened in sampling rate: "
+               << playoutDesc.mSampleRate;
 
-    // Get stream format for out/0
-    AudioStreamBasicDescription playoutDesc;
-    UInt32 size = sizeof(playoutDesc);
-    result = AudioUnitGetProperty(_auVoiceProcessing,
-                                  kAudioUnitProperty_StreamFormat,
-                                  kAudioUnitScope_Output, 0, &playoutDesc,
-                                  &size);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not get stream format Audio Unit out/0 (result=%d)",
-            result);
-    }
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "  Audio Unit playout opened in sampling rate %f",
-                 playoutDesc.mSampleRate);
+  // Store the sampling frequency to use towards the Audio Device Buffer
+  // todo: Add 48 kHz (increase buffer sizes). Other fs?
+  // TODO(henrika): Figure out if we really need this complex handling.
+  if ((playoutDesc.mSampleRate > 44090.0) &&
+      (playoutDesc.mSampleRate < 44110.0)) {
+    _adbSampFreq = 44100;
+  } else if ((playoutDesc.mSampleRate > 15990.0) &&
+             (playoutDesc.mSampleRate < 16010.0)) {
+    _adbSampFreq = 16000;
+  } else if ((playoutDesc.mSampleRate > 7990.0) &&
+             (playoutDesc.mSampleRate < 8010.0)) {
+    _adbSampFreq = 8000;
+  } else {
+    _adbSampFreq = 0;
+    FATAL() << "Invalid sample rate";
+  }
 
-    playoutDesc.mSampleRate = preferredSampleRate;
+  // Set the audio device buffer sampling rates (use same for play and record).
+  // TODO(henrika): this is not a good place to set these things up.
+  DCHECK(audio_device_buffer_);
+  DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq);
+  audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq);
 
-    // Store the sampling frequency to use towards the Audio Device Buffer
-    // todo: Add 48 kHz (increase buffer sizes). Other fs?
-    if ((playoutDesc.mSampleRate > 44090.0)
-        && (playoutDesc.mSampleRate < 44110.0)) {
-        _adbSampFreq = 44100;
-    } else if ((playoutDesc.mSampleRate > 15990.0)
-               && (playoutDesc.mSampleRate < 16010.0)) {
-        _adbSampFreq = 16000;
-    } else if ((playoutDesc.mSampleRate > 7990.0)
-               && (playoutDesc.mSampleRate < 8010.0)) {
-        _adbSampFreq = 8000;
-    } else {
-        _adbSampFreq = 0;
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Audio Unit out/0 opened in unknown sampling rate (%f)",
-            playoutDesc.mSampleRate);
-        // todo: We should bail out here.
-    }
+  // Set stream format for out/0.
+  playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
+                             kLinearPCMFormatFlagIsPacked |
+                             kLinearPCMFormatFlagIsNonInterleaved;
+  playoutDesc.mBytesPerPacket = 2;
+  playoutDesc.mFramesPerPacket = 1;
+  playoutDesc.mBytesPerFrame = 2;
+  playoutDesc.mChannelsPerFrame = 1;
+  playoutDesc.mBitsPerChannel = 16;
+  result =
+      AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Input, 0, &playoutDesc, size);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0";
+  }
 
-    // Set the audio device buffer sampling rate,
-    // we assume we get the same for play and record
-    if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set audio device buffer recording sampling rate (%d)",
-            _adbSampFreq);
-    }
+  // Get stream format for in/1.
+  AudioStreamBasicDescription recordingDesc;
+  size = sizeof(recordingDesc);
+  result =
+      AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Input, 1, &recordingDesc, &size);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1";
+  }
 
-    if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set audio device buffer playout sampling rate (%d)",
-            _adbSampFreq);
-    }
+  recordingDesc.mSampleRate = preferredSampleRate;
+  LOG(LS_INFO) << "Audio Unit recording opened in sampling rate: "
+               << recordingDesc.mSampleRate;
 
-    // Set stream format for in/0  (use same sampling frequency as for out/0)
-    playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
-                               | kLinearPCMFormatFlagIsPacked
-                               | kLinearPCMFormatFlagIsNonInterleaved;
-    playoutDesc.mBytesPerPacket = 2;
-    playoutDesc.mFramesPerPacket = 1;
-    playoutDesc.mBytesPerFrame = 2;
-    playoutDesc.mChannelsPerFrame = 1;
-    playoutDesc.mBitsPerChannel = 16;
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioUnitProperty_StreamFormat,
-                                  kAudioUnitScope_Input, 0, &playoutDesc, size);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set stream format Audio Unit in/0 (result=%d)",
-            result);
-    }
+  // Set stream format for out/1 (use same sampling frequency as for in/1).
+  recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
+                               kLinearPCMFormatFlagIsPacked |
+                               kLinearPCMFormatFlagIsNonInterleaved;
+  recordingDesc.mBytesPerPacket = 2;
+  recordingDesc.mFramesPerPacket = 1;
+  recordingDesc.mBytesPerFrame = 2;
+  recordingDesc.mChannelsPerFrame = 1;
+  recordingDesc.mBitsPerChannel = 16;
+  result =
+      AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Output, 1, &recordingDesc, size);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1";
+  }
 
-    // Get stream format for in/1
-    AudioStreamBasicDescription recordingDesc;
-    size = sizeof(recordingDesc);
-    result = AudioUnitGetProperty(_auVoiceProcessing,
-                                  kAudioUnitProperty_StreamFormat,
-                                  kAudioUnitScope_Input, 1, &recordingDesc,
-                                  &size);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not get stream format Audio Unit in/1 (result=%d)",
-            result);
-    }
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                 "  Audio Unit recording opened in sampling rate %f",
-                 recordingDesc.mSampleRate);
+  // Initialize here already to be able to get/set stream properties.
+  result = AudioUnitInitialize(_auVoiceProcessing);
+  if (0 != result) {
+    LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result;
+  }
 
-    recordingDesc.mSampleRate = preferredSampleRate;
+  // Get hardware sample rate for logging (see if we get what we asked for).
+  // TODO(henrika): what if we don't get what we ask for?
+  double sampleRate = session.sampleRate;
+  LOG(LS_INFO) << "Current HW sample rate is: " << sampleRate
+               << ", ADB sample rate is: " << _adbSampFreq;
+  LOG(LS_INFO) << "Current HW IO buffer size is: " <<
+      [session IOBufferDuration];
 
-    // Set stream format for out/1 (use same sampling frequency as for in/1)
-    recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
-                                 | kLinearPCMFormatFlagIsPacked
-                                 | kLinearPCMFormatFlagIsNonInterleaved;
+  // Listen to audio interruptions.
+  // TODO(henrika): learn this area better.
+  NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+  id observer = [center
+      addObserverForName:AVAudioSessionInterruptionNotification
+                  object:nil
+                   queue:[NSOperationQueue mainQueue]
+              usingBlock:^(NSNotification* notification) {
+                NSNumber* typeNumber =
+                    [notification userInfo][AVAudioSessionInterruptionTypeKey];
+                AVAudioSessionInterruptionType type =
+                    (AVAudioSessionInterruptionType)[typeNumber
+                                                         unsignedIntegerValue];
+                switch (type) {
+                  case AVAudioSessionInterruptionTypeBegan:
+                    // At this point our audio session has been deactivated and
+                    // the
+                    // audio unit render callbacks no longer occur. Nothing to
+                    // do.
+                    break;
+                  case AVAudioSessionInterruptionTypeEnded: {
+                    NSError* error = nil;
+                    AVAudioSession* session = [AVAudioSession sharedInstance];
+                    [session setActive:YES error:&error];
+                    if (error != nil) {
+                      LOG_F(LS_ERROR) << "Failed to active audio session";
+                    }
+                    // Post interruption the audio unit render callbacks don't
+                    // automatically continue, so we restart the unit manually
+                    // here.
+                    AudioOutputUnitStop(_auVoiceProcessing);
+                    AudioOutputUnitStart(_auVoiceProcessing);
+                    break;
+                  }
+                }
+              }];
+  // Increment refcount on observer using ARC bridge. Instance variable is a
+  // void* instead of an id because header is included in other pure C++
+  // files.
+  _audioInterruptionObserver = (__bridge_retained void*)observer;
 
-    recordingDesc.mBytesPerPacket = 2;
-    recordingDesc.mFramesPerPacket = 1;
-    recordingDesc.mBytesPerFrame = 2;
-    recordingDesc.mChannelsPerFrame = 1;
-    recordingDesc.mBitsPerChannel = 16;
-    result = AudioUnitSetProperty(_auVoiceProcessing,
-                                  kAudioUnitProperty_StreamFormat,
-                                  kAudioUnitScope_Output, 1, &recordingDesc,
-                                  size);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-            "  Could not set stream format Audio Unit out/1 (result=%d)",
-            result);
-    }
+  // Deactivate the audio session.
+  ActivateAudioSession(session, false);
 
-    // Initialize here already to be able to get/set stream properties.
-    result = AudioUnitInitialize(_auVoiceProcessing);
-    if (0 != result) {
-        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                     "  Could not init Audio Unit (result=%d)", result);
-    }
-
-    // Get hardware sample rate for logging (see if we get what we asked for)
-    double sampleRate = session.sampleRate;
-    WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "  Current HW sample rate is %f, ADB sample rate is %d",
-                 sampleRate, _adbSampFreq);
-
-    // Listen to audio interruptions.
-    NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
-    id observer =
-        [center addObserverForName:AVAudioSessionInterruptionNotification
-                            object:nil
-                             queue:[NSOperationQueue mainQueue]
-                        usingBlock:^(NSNotification* notification) {
-          NSNumber* typeNumber =
-              [notification userInfo][AVAudioSessionInterruptionTypeKey];
-          AVAudioSessionInterruptionType type =
-              (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
-          switch (type) {
-            case AVAudioSessionInterruptionTypeBegan:
-              // At this point our audio session has been deactivated and the
-              // audio unit render callbacks no longer occur. Nothing to do.
-              break;
-            case AVAudioSessionInterruptionTypeEnded: {
-              NSError* error = nil;
-              AVAudioSession* session = [AVAudioSession sharedInstance];
-              [session setActive:YES
-                           error:&error];
-              if (error != nil) {
-                  WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                               "Error activating audio session");
-              }
-              // Post interruption the audio unit render callbacks don't
-              // automatically continue, so we restart the unit manually here.
-              AudioOutputUnitStop(_auVoiceProcessing);
-              AudioOutputUnitStart(_auVoiceProcessing);
-              break;
-            }
-          }
-        }];
-    // Increment refcount on observer using ARC bridge. Instance variable is a
-    // void* instead of an id because header is included in other pure C++
-    // files.
-    _audioInterruptionObserver = (__bridge_retained void*)observer;
-
-    // Activate audio session.
-    error = nil;
-    [session setActive:YES
-                 error:&error];
-    if (error != nil) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "Error activating audio session");
-    }
-
-    return 0;
+  return 0;
 }
 
 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
-    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+  LOGI() << "ShutdownPlayOrRecord";
 
-    if (_audioInterruptionObserver != NULL) {
-        NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
-        // Transfer ownership of observer back to ARC, which will dealloc the
-        // observer once it exits this scope.
-        id observer = (__bridge_transfer id)_audioInterruptionObserver;
-        [center removeObserver:observer];
-        _audioInterruptionObserver = NULL;
+  if (_audioInterruptionObserver != nullptr) {
+    NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+    // Transfer ownership of observer back to ARC, which will dealloc the
+    // observer once it exits this scope.
+    id observer = (__bridge_transfer id)_audioInterruptionObserver;
+    [center removeObserver:observer];
+    _audioInterruptionObserver = nullptr;
+  }
+
+  // Close and delete AU.
+  OSStatus result = -1;
+  if (nullptr != _auVoiceProcessing) {
+    result = AudioOutputUnitStop(_auVoiceProcessing);
+    if (0 != result) {
+      LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
     }
-
-    // Close and delete AU
-    OSStatus result = -1;
-    if (NULL != _auVoiceProcessing) {
-        result = AudioOutputUnitStop(_auVoiceProcessing);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                "  Error stopping Audio Unit (result=%d)", result);
-        }
-        result = AudioComponentInstanceDispose(_auVoiceProcessing);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                "  Error disposing Audio Unit (result=%d)", result);
-        }
-        _auVoiceProcessing = NULL;
+    result = AudioComponentInstanceDispose(_auVoiceProcessing);
+    if (0 != result) {
+      LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
     }
+    _auVoiceProcessing = nullptr;
+  }
 
-    return 0;
+  return 0;
 }
 
 // ============================================================================
 //                                  Thread Methods
 // ============================================================================
 
-OSStatus
-    AudioDeviceIOS::RecordProcess(void *inRefCon,
-                                  AudioUnitRenderActionFlags *ioActionFlags,
-                                  const AudioTimeStamp *inTimeStamp,
-                                  UInt32 inBusNumber,
-                                  UInt32 inNumberFrames,
-                                  AudioBufferList *ioData) {
-    AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
-
-    return ptrThis->RecordProcessImpl(ioActionFlags,
-                                      inTimeStamp,
-                                      inBusNumber,
-                                      inNumberFrames);
+OSStatus AudioDeviceIOS::RecordProcess(
+    void* inRefCon,
+    AudioUnitRenderActionFlags* ioActionFlags,
+    const AudioTimeStamp* inTimeStamp,
+    UInt32 inBusNumber,
+    UInt32 inNumberFrames,
+    AudioBufferList* ioData) {
+  AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
+  return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber,
+                                    inNumberFrames);
 }
 
+OSStatus AudioDeviceIOS::RecordProcessImpl(
+    AudioUnitRenderActionFlags* ioActionFlags,
+    const AudioTimeStamp* inTimeStamp,
+    uint32_t inBusNumber,
+    uint32_t inNumberFrames) {
+  // Setup some basic stuff
+  // Use temp buffer not to lock up recording buffer more than necessary
+  // todo: Make dataTmp a member variable with static size that holds
+  //       max possible frames?
+  int16_t* dataTmp = new int16_t[inNumberFrames];
+  memset(dataTmp, 0, 2 * inNumberFrames);
 
-OSStatus
-    AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
-                                      const AudioTimeStamp *inTimeStamp,
-                                      uint32_t inBusNumber,
-                                      uint32_t inNumberFrames) {
-    // Setup some basic stuff
-    // Use temp buffer not to lock up recording buffer more than necessary
-    // todo: Make dataTmp a member variable with static size that holds
-    //       max possible frames?
-    int16_t* dataTmp = new int16_t[inNumberFrames];
-    memset(dataTmp, 0, 2*inNumberFrames);
+  AudioBufferList abList;
+  abList.mNumberBuffers = 1;
+  abList.mBuffers[0].mData = dataTmp;
+  abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames;  // 2 bytes/sample
+  abList.mBuffers[0].mNumberChannels = 1;
 
-    AudioBufferList abList;
-    abList.mNumberBuffers = 1;
-    abList.mBuffers[0].mData = dataTmp;
-    abList.mBuffers[0].mDataByteSize = 2*inNumberFrames;  // 2 bytes/sample
-    abList.mBuffers[0].mNumberChannels = 1;
-
-    // Get data from mic
-    OSStatus res = AudioUnitRender(_auVoiceProcessing,
-                                   ioActionFlags, inTimeStamp,
-                                   inBusNumber, inNumberFrames, &abList);
-    if (res != 0) {
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "  Error getting rec data, error = %d", res);
-
-        if (_recWarning > 0) {
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                         "  Pending rec warning exists");
-        }
-        _recWarning = 1;
-
-        delete [] dataTmp;
-        return 0;
-    }
-
-    if (_recording) {
-        // Insert all data in temp buffer into recording buffers
-        // There is zero or one buffer partially full at any given time,
-        // all others are full or empty
-        // Full means filled with noSamp10ms samples.
-
-        const unsigned int noSamp10ms = _adbSampFreq / 100;
-        unsigned int dataPos = 0;
-        uint16_t bufPos = 0;
-        int16_t insertPos = -1;
-        unsigned int nCopy = 0;  // Number of samples to copy
-
-        while (dataPos < inNumberFrames) {
-            // Loop over all recording buffers or
-            // until we find the partially full buffer
-            // First choice is to insert into partially full buffer,
-            // second choice is to insert into empty buffer
-            bufPos = 0;
-            insertPos = -1;
-            nCopy = 0;
-            while (bufPos < N_REC_BUFFERS) {
-                if ((_recordingLength[bufPos] > 0)
-                    && (_recordingLength[bufPos] < noSamp10ms)) {
-                    // Found the partially full buffer
-                    insertPos = static_cast<int16_t>(bufPos);
-                    // Don't need to search more, quit loop
-                    bufPos = N_REC_BUFFERS;
-                } else if ((-1 == insertPos)
-                           && (0 == _recordingLength[bufPos])) {
-                    // Found an empty buffer
-                    insertPos = static_cast<int16_t>(bufPos);
-                }
-                ++bufPos;
-            }
-
-            // Insert data into buffer
-            if (insertPos > -1) {
-                // We found a non-full buffer, copy data to it
-                unsigned int dataToCopy = inNumberFrames - dataPos;
-                unsigned int currentRecLen = _recordingLength[insertPos];
-                unsigned int roomInBuffer = noSamp10ms - currentRecLen;
-                nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
-
-                memcpy(&_recordingBuffer[insertPos][currentRecLen],
-                       &dataTmp[dataPos], nCopy*sizeof(int16_t));
-                if (0 == currentRecLen) {
-                    _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
-                    ++_recordingCurrentSeq;
-                }
-                _recordingBufferTotalSize += nCopy;
-                // Has to be done last to avoid interrupt problems
-                // between threads
-                _recordingLength[insertPos] += nCopy;
-                dataPos += nCopy;
-            } else {
-                // Didn't find a non-full buffer
-                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                             "  Could not insert into recording buffer");
-                if (_recWarning > 0) {
-                    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                                 "  Pending rec warning exists");
-                }
-                _recWarning = 1;
-                dataPos = inNumberFrames;  // Don't try to insert more
-            }
-        }
-    }
-
-    delete [] dataTmp;
-
+  // Get data from mic
+  OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp,
+                                 inBusNumber, inNumberFrames, &abList);
+  if (res != 0) {
+    // TODO(henrika): improve error handling.
+    delete[] dataTmp;
     return 0;
-}
+  }
 
-OSStatus
-    AudioDeviceIOS::PlayoutProcess(void *inRefCon,
-                                   AudioUnitRenderActionFlags *ioActionFlags,
-                                   const AudioTimeStamp *inTimeStamp,
-                                   UInt32 inBusNumber,
-                                   UInt32 inNumberFrames,
-                                   AudioBufferList *ioData) {
-    AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
+  if (_recording) {
+    // Insert all data in temp buffer into recording buffers
+    // There is zero or one buffer partially full at any given time,
+    // all others are full or empty
+    // Full means filled with noSamp10ms samples.
 
-    return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
-}
+    const unsigned int noSamp10ms = _adbSampFreq / 100;
+    unsigned int dataPos = 0;
+    uint16_t bufPos = 0;
+    int16_t insertPos = -1;
+    unsigned int nCopy = 0;  // Number of samples to copy
 
-OSStatus
-    AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
-                                       AudioBufferList *ioData) {
-    // Setup some basic stuff
-//    assert(sizeof(short) == 2); // Assumption for implementation
-
-    int16_t* data =
-        static_cast<int16_t*>(ioData->mBuffers[0].mData);
-    unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
-    unsigned int dataSize = dataSizeBytes/2;  // Number of samples
-        if (dataSize != inNumberFrames) {  // Should always be the same
-        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                     "dataSize (%u) != inNumberFrames (%u)",
-                     dataSize, (unsigned int)inNumberFrames);
-        if (_playWarning > 0) {
-            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                         "  Pending play warning exists");
+    while (dataPos < inNumberFrames) {
+      // Loop over all recording buffers or
+      // until we find the partially full buffer
+      // First choice is to insert into partially full buffer,
+      // second choice is to insert into empty buffer
+      bufPos = 0;
+      insertPos = -1;
+      nCopy = 0;
+      while (bufPos < N_REC_BUFFERS) {
+        if ((_recordingLength[bufPos] > 0) &&
+            (_recordingLength[bufPos] < noSamp10ms)) {
+          // Found the partially full buffer
+          insertPos = static_cast<int16_t>(bufPos);
+          // Don't need to search more, quit loop
+          bufPos = N_REC_BUFFERS;
+        } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) {
+          // Found an empty buffer
+          insertPos = static_cast<int16_t>(bufPos);
         }
-        _playWarning = 1;
+        ++bufPos;
+      }
+
+      // Insert data into buffer
+      if (insertPos > -1) {
+        // We found a non-full buffer, copy data to it
+        unsigned int dataToCopy = inNumberFrames - dataPos;
+        unsigned int currentRecLen = _recordingLength[insertPos];
+        unsigned int roomInBuffer = noSamp10ms - currentRecLen;
+        nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
+
+        memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos],
+               nCopy * sizeof(int16_t));
+        if (0 == currentRecLen) {
+          _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
+          ++_recordingCurrentSeq;
+        }
+        _recordingBufferTotalSize += nCopy;
+        // Has to be done last to avoid interrupt problems between threads.
+        _recordingLength[insertPos] += nCopy;
+        dataPos += nCopy;
+      } else {
+        // Didn't find a non-full buffer
+        // TODO(henrika): improve error handling
+        dataPos = inNumberFrames;  // Don't try to insert more
+      }
     }
-    memset(data, 0, dataSizeBytes);  // Start with empty buffer
+  }
+  delete[] dataTmp;
+  return 0;
+}
 
+OSStatus AudioDeviceIOS::PlayoutProcess(
+    void* inRefCon,
+    AudioUnitRenderActionFlags* ioActionFlags,
+    const AudioTimeStamp* inTimeStamp,
+    UInt32 inBusNumber,
+    UInt32 inNumberFrames,
+    AudioBufferList* ioData) {
+  AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
+  return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
+}
 
-    // Get playout data from Audio Device Buffer
+OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
+                                            AudioBufferList* ioData) {
+  int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData);
+  unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
+  unsigned int dataSize = dataSizeBytes / 2;  // Number of samples
+  CHECK_EQ(dataSize, inNumberFrames);
+  memset(data, 0, dataSizeBytes);  // Start with empty buffer
 
-    if (_playing) {
-        unsigned int noSamp10ms = _adbSampFreq / 100;
-        // todo: Member variable and allocate when samp freq is determined
-        int16_t* dataTmp = new int16_t[noSamp10ms];
-        memset(dataTmp, 0, 2*noSamp10ms);
-        unsigned int dataPos = 0;
-        int noSamplesOut = 0;
-        unsigned int nCopy = 0;
+  // Get playout data from Audio Device Buffer
 
-        // First insert data from playout buffer if any
-        if (_playoutBufferUsed > 0) {
-            nCopy = (dataSize < _playoutBufferUsed) ?
-                    dataSize : _playoutBufferUsed;
-            if (nCopy != _playoutBufferUsed) {
-                // todo: If dataSize < _playoutBufferUsed
-                //       (should normally never be)
-                //       we must move the remaining data
-                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                             "nCopy (%u) != _playoutBufferUsed (%u)",
-                             nCopy, _playoutBufferUsed);
-                if (_playWarning > 0) {
-                    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                                 "  Pending play warning exists");
-                }
-                _playWarning = 1;
-            }
-            memcpy(data, _playoutBuffer, 2*nCopy);
-            dataPos = nCopy;
-            memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
-            _playoutBufferUsed = 0;
-        }
+  if (_playing) {
+    unsigned int noSamp10ms = _adbSampFreq / 100;
+    // todo: Member variable and allocate when samp freq is determined
+    int16_t* dataTmp = new int16_t[noSamp10ms];
+    memset(dataTmp, 0, 2 * noSamp10ms);
+    unsigned int dataPos = 0;
+    int noSamplesOut = 0;
+    unsigned int nCopy = 0;
 
-        // Now get the rest from Audio Device Buffer
-        while (dataPos < dataSize) {
-            // Update playout delay
-            UpdatePlayoutDelay();
-
-            // Ask for new PCM data to be played out using the AudioDeviceBuffer
-            noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
-
-            // Get data from Audio Device Buffer
-            noSamplesOut =
-                _ptrAudioBuffer->GetPlayoutData(
-                    reinterpret_cast<int8_t*>(dataTmp));
-            // Cast OK since only equality comparison
-            if (noSamp10ms != (unsigned int)noSamplesOut) {
-                // Should never happen
-                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                             "noSamp10ms (%u) != noSamplesOut (%d)",
-                             noSamp10ms, noSamplesOut);
-
-                if (_playWarning > 0) {
-                    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
-                                 "  Pending play warning exists");
-                }
-                _playWarning = 1;
-            }
-
-            // Insert as much as fits in data buffer
-            nCopy = (dataSize-dataPos) > noSamp10ms ?
-                    noSamp10ms : (dataSize-dataPos);
-            memcpy(&data[dataPos], dataTmp, 2*nCopy);
-
-            // Save rest in playout buffer if any
-            if (nCopy < noSamp10ms) {
-                memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
-                _playoutBufferUsed = noSamp10ms - nCopy;
-            }
-
-            // Update loop/index counter, if we copied less than noSamp10ms
-            // samples we shall quit loop anyway
-            dataPos += noSamp10ms;
-        }
-
-        delete [] dataTmp;
+    // First insert data from playout buffer if any
+    if (_playoutBufferUsed > 0) {
+      nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed;
+      DCHECK_EQ(nCopy, _playoutBufferUsed);
+      memcpy(data, _playoutBuffer, 2 * nCopy);
+      dataPos = nCopy;
+      memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+      _playoutBufferUsed = 0;
     }
 
-    return 0;
+    // Now get the rest from Audio Device Buffer.
+    while (dataPos < dataSize) {
+      // Update playout delay
+      UpdatePlayoutDelay();
+
+      // Ask for new PCM data to be played out using the AudioDeviceBuffer
+      noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms);
+
+      // Get data from Audio Device Buffer
+      noSamplesOut = audio_device_buffer_->GetPlayoutData(
+          reinterpret_cast<int8_t*>(dataTmp));
+      CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut);
+
+      // Insert as much as fits in data buffer
+      nCopy =
+          (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos);
+      memcpy(&data[dataPos], dataTmp, 2 * nCopy);
+
+      // Save rest in playout buffer if any
+      if (nCopy < noSamp10ms) {
+        memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy));
+        _playoutBufferUsed = noSamp10ms - nCopy;
+      }
+
+      // Update loop/index counter, if we copied less than noSamp10ms
+      // samples we shall quit loop anyway
+      dataPos += noSamp10ms;
+    }
+    delete[] dataTmp;
+  }
+  return 0;
 }
 
+// TODO(henrika): can either be removed or simplified.
 void AudioDeviceIOS::UpdatePlayoutDelay() {
-    ++_playoutDelayMeasurementCounter;
+  ++_playoutDelayMeasurementCounter;
 
-    if (_playoutDelayMeasurementCounter >= 100) {
-        // Update HW and OS delay every second, unlikely to change
+  if (_playoutDelayMeasurementCounter >= 100) {
+    // Update HW and OS delay every second, unlikely to change
 
-        // Since this is eventually rounded to integral ms, add 0.5ms
-        // here to get round-to-nearest-int behavior instead of
-        // truncation.
-        double totalDelaySeconds = 0.0005;
+    // Since this is eventually rounded to integral ms, add 0.5ms
+    // here to get round-to-nearest-int behavior instead of
+    // truncation.
+    double totalDelaySeconds = 0.0005;
 
-        // HW output latency
-        AVAudioSession* session = [AVAudioSession sharedInstance];
-        double latency = session.outputLatency;
-        assert(latency >= 0);
-        totalDelaySeconds += latency;
+    // HW output latency
+    AVAudioSession* session = [AVAudioSession sharedInstance];
+    double latency = session.outputLatency;
+    assert(latency >= 0);
+    totalDelaySeconds += latency;
 
-        // HW buffer duration
-        double ioBufferDuration = session.IOBufferDuration;
-        assert(ioBufferDuration >= 0);
-        totalDelaySeconds += ioBufferDuration;
+    // HW buffer duration
+    double ioBufferDuration = session.IOBufferDuration;
+    assert(ioBufferDuration >= 0);
+    totalDelaySeconds += ioBufferDuration;
 
-        // AU latency
-        Float64 f64(0);
-        UInt32 size = sizeof(f64);
-        OSStatus result = AudioUnitGetProperty(
-            _auVoiceProcessing, kAudioUnitProperty_Latency,
-            kAudioUnitScope_Global, 0, &f64, &size);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                         "error AU latency (result=%d)", result);
-        }
-        assert(f64 >= 0);
-        totalDelaySeconds += f64;
-
-        // To ms
-        _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
-
-        // Reset counter
-        _playoutDelayMeasurementCounter = 0;
+    // AU latency
+    Float64 f64(0);
+    UInt32 size = sizeof(f64);
+    OSStatus result =
+        AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
+                             kAudioUnitScope_Global, 0, &f64, &size);
+    if (0 != result) {
+      LOG_F(LS_ERROR) << "AU latency error: " << result;
     }
+    assert(f64 >= 0);
+    totalDelaySeconds += f64;
 
-    // todo: Add playout buffer?
+    // To ms
+    _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
+
+    // Reset counter
+    _playoutDelayMeasurementCounter = 0;
+  }
+
+  // todo: Add playout buffer?
 }
 
 void AudioDeviceIOS::UpdateRecordingDelay() {
-    ++_recordingDelayMeasurementCounter;
+  ++_recordingDelayMeasurementCounter;
 
-    if (_recordingDelayMeasurementCounter >= 100) {
-        // Update HW and OS delay every second, unlikely to change
+  if (_recordingDelayMeasurementCounter >= 100) {
+    // Update HW and OS delay every second, unlikely to change
 
-        // Since this is eventually rounded to integral ms, add 0.5ms
-        // here to get round-to-nearest-int behavior instead of
-        // truncation.
-        double totalDelaySeconds = 0.0005;
+    // Since this is eventually rounded to integral ms, add 0.5ms
+    // here to get round-to-nearest-int behavior instead of
+    // truncation.
+    double totalDelaySeconds = 0.0005;
 
-        // HW input latency
-        AVAudioSession* session = [AVAudioSession sharedInstance];
-        double latency = session.inputLatency;
-        assert(latency >= 0);
-        totalDelaySeconds += latency;
+    // HW input latency
+    AVAudioSession* session = [AVAudioSession sharedInstance];
+    double latency = session.inputLatency;
+    assert(latency >= 0);
+    totalDelaySeconds += latency;
 
-        // HW buffer duration
-        double ioBufferDuration = session.IOBufferDuration;
-        assert(ioBufferDuration >= 0);
-        totalDelaySeconds += ioBufferDuration;
+    // HW buffer duration
+    double ioBufferDuration = session.IOBufferDuration;
+    assert(ioBufferDuration >= 0);
+    totalDelaySeconds += ioBufferDuration;
 
-        // AU latency
-        Float64 f64(0);
-        UInt32 size = sizeof(f64);
-        OSStatus result = AudioUnitGetProperty(
-             _auVoiceProcessing, kAudioUnitProperty_Latency,
-             kAudioUnitScope_Global, 0, &f64, &size);
-        if (0 != result) {
-            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
-                         "error AU latency (result=%d)", result);
-        }
-        assert(f64 >= 0);
-        totalDelaySeconds += f64;
-
-        // To ms
-        _recordingDelayHWAndOS =
-            static_cast<uint32_t>(totalDelaySeconds / 1000);
-
-        // Reset counter
-        _recordingDelayMeasurementCounter = 0;
+    // AU latency
+    Float64 f64(0);
+    UInt32 size = sizeof(f64);
+    OSStatus result =
+        AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
+                             kAudioUnitScope_Global, 0, &f64, &size);
+    if (0 != result) {
+      LOG_F(LS_ERROR) << "AU latency error: " << result;
     }
+    assert(f64 >= 0);
+    totalDelaySeconds += f64;
 
-    _recordingDelay = _recordingDelayHWAndOS;
+    // To ms
+    _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000);
 
-    // ADB recording buffer size, update every time
-    // Don't count the one next 10 ms to be sent, then convert samples => ms
-    const uint32_t noSamp10ms = _adbSampFreq / 100;
-    if (_recordingBufferTotalSize > noSamp10ms) {
-        _recordingDelay +=
-            (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
-    }
+    // Reset counter
+    _recordingDelayMeasurementCounter = 0;
+  }
+
+  _recordingDelay = _recordingDelayHWAndOS;
+
+  // ADB recording buffer size, update every time
+  // Don't count the one next 10 ms to be sent, then convert samples => ms
+  const uint32_t noSamp10ms = _adbSampFreq / 100;
+  if (_recordingBufferTotalSize > noSamp10ms) {
+    _recordingDelay +=
+        (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
+  }
 }
 
 bool AudioDeviceIOS::RunCapture(void* ptrThis) {
-    return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
+  return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
 }
 
 bool AudioDeviceIOS::CaptureWorkerThread() {
-    if (_recording) {
-        int bufPos = 0;
-        unsigned int lowestSeq = 0;
-        int lowestSeqBufPos = 0;
-        bool foundBuf = true;
-        const unsigned int noSamp10ms = _adbSampFreq / 100;
+  if (_recording) {
+    int bufPos = 0;
+    unsigned int lowestSeq = 0;
+    int lowestSeqBufPos = 0;
+    bool foundBuf = true;
+    const unsigned int noSamp10ms = _adbSampFreq / 100;
 
-        while (foundBuf) {
-            // Check if we have any buffer with data to insert
-            // into the Audio Device Buffer,
-            // and find the one with the lowest seq number
-            foundBuf = false;
-            for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
-                if (noSamp10ms == _recordingLength[bufPos]) {
-                    if (!foundBuf) {
-                        lowestSeq = _recordingSeqNumber[bufPos];
-                        lowestSeqBufPos = bufPos;
-                        foundBuf = true;
-                    } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
-                        lowestSeq = _recordingSeqNumber[bufPos];
-                        lowestSeqBufPos = bufPos;
-                    }
-                }
-            }  // for
+    while (foundBuf) {
+      // Check if we have any buffer with data to insert
+      // into the Audio Device Buffer,
+      // and find the one with the lowest seq number
+      foundBuf = false;
+      for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
+        if (noSamp10ms == _recordingLength[bufPos]) {
+          if (!foundBuf) {
+            lowestSeq = _recordingSeqNumber[bufPos];
+            lowestSeqBufPos = bufPos;
+            foundBuf = true;
+          } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
+            lowestSeq = _recordingSeqNumber[bufPos];
+            lowestSeqBufPos = bufPos;
+          }
+        }
+      }
 
-            // Insert data into the Audio Device Buffer if found any
-            if (foundBuf) {
-                // Update recording delay
-                UpdateRecordingDelay();
+      // Insert data into the Audio Device Buffer if found any
+      if (foundBuf) {
+        // Update recording delay
+        UpdateRecordingDelay();
 
-                // Set the recorded buffer
-                _ptrAudioBuffer->SetRecordedBuffer(
-                    reinterpret_cast<int8_t*>(
-                        _recordingBuffer[lowestSeqBufPos]),
-                        _recordingLength[lowestSeqBufPos]);
+        // Set the recorded buffer
+        audio_device_buffer_->SetRecordedBuffer(
+            reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]),
+            _recordingLength[lowestSeqBufPos]);
 
-                // Don't need to set the current mic level in ADB since we only
-                // support digital AGC,
-                // and besides we cannot get or set the IOS mic level anyway.
+        // Don't need to set the current mic level in ADB since we only
+        // support digital AGC,
+        // and besides we cannot get or set the IOS mic level anyway.
 
-                // Set VQE info, use clockdrift == 0
-                _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
+        // Set VQE info, use clockdrift == 0
+        audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0);
 
-                // Deliver recorded samples at specified sample rate, mic level
-                // etc. to the observer using callback
-                _ptrAudioBuffer->DeliverRecordedData();
+        // Deliver recorded samples at specified sample rate, mic level
+        // etc. to the observer using callback
+        audio_device_buffer_->DeliverRecordedData();
 
-                // Make buffer available
-                _recordingSeqNumber[lowestSeqBufPos] = 0;
-                _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
-                // Must be done last to avoid interrupt problems between threads
-                _recordingLength[lowestSeqBufPos] = 0;
-            }
-        }  // while (foundBuf)
-    }  // if (_recording)
-
-    {
-        // Normal case
-        // Sleep thread (5ms) to let other threads get to work
-        // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
-        //       Device Buffer?
-        timespec t;
-        t.tv_sec = 0;
-        t.tv_nsec = 5*1000*1000;
-        nanosleep(&t, NULL);
+        // Make buffer available
+        _recordingSeqNumber[lowestSeqBufPos] = 0;
+        _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
+        // Must be done last to avoid interrupt problems between threads
+        _recordingLength[lowestSeqBufPos] = 0;
+      }
     }
+  }
 
-    return true;
+  {
+    // Normal case
+    // Sleep thread (5ms) to let other threads get to work
+    // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
+    //       Device Buffer?
+    timespec t;
+    t.tv_sec = 0;
+    t.tv_nsec = 5 * 1000 * 1000;
+    nanosleep(&t, nullptr);
+  }
+  return true;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm b/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm
new file mode 100644
index 0000000..24875cc
--- /dev/null
+++ b/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm
@@ -0,0 +1,286 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+int32_t AudioDeviceIOS::ActiveAudioLayer(
+    AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::ResetAudioDevice() {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int16_t AudioDeviceIOS::PlayoutDevices() {
+  // TODO(henrika): improve.
+  LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int16_t AudioDeviceIOS::RecordingDevices() {
+  // TODO(henrika): improve.
+  LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int32_t AudioDeviceIOS::InitSpeaker() {
+  return 0;
+}
+
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetWaveOutVolume(uint16_t, uint16_t) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::WaveOutVolume(uint16_t&, uint16_t&) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
+  LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+bool AudioDeviceIOS::PlayoutWarning() const {
+  return false;
+}
+
+bool AudioDeviceIOS::PlayoutError() const {
+  return false;
+}
+
+bool AudioDeviceIOS::RecordingWarning() const {
+  return false;
+}
+
+bool AudioDeviceIOS::RecordingError() const {
+  return false;
+}
+
+int32_t AudioDeviceIOS::InitMicrophone() {
+  return 0;
+}
+
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
+  LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
+  LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetAGC(bool enable) {
+  if (enable) {
+    RTC_NOTREACHED() << "Should never be called";
+  }
+  return -1;
+}
+
+bool AudioDeviceIOS::AGC() const {
+  return false;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
+                                          char name[kAdmMaxDeviceNameSize],
+                                          char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
+                                            char name[kAdmMaxDeviceNameSize],
+                                            char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
+  LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(
+    AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutBuffer(
+    const AudioDeviceModule::BufferType type,
+    uint16_t sizeMS) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::CPULoad(uint16_t&) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
new file mode 100644
index 0000000..b75f18f
--- /dev/null
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -0,0 +1,788 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/modules/audio_device/audio_device_impl.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/event_wrapper.h"
+#include "webrtc/system_wrappers/interface/sleep.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+using std::cout;
+using std::endl;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const int kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Number of bits per PCM audio sample.
+static const int kBitsPerSample = 16;
+// Number of bytes per PCM audio sample.
+static const int kBytesPerSample = kBitsPerSample / 8;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const int kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const int kFilePlayTimeInSec = 15;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+static const int kFullDuplexTimeInSec = 10;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const int kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kMeasureLatencyTimeInSec = 5;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseThreshold = 50;
+static const char kTag[] = "[..........] ";
+
+enum TransportType {
+  kPlayout = 0x1,
+  kRecording = 0x2,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
+ public:
+  virtual void Write(const void* source, int num_frames) = 0;
+  virtual void Read(void* destination, int num_frames) = 0;
+
+ protected:
+  virtual ~AudioStreamInterface() {}
+};
+
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+  FileAudioStream(int num_callbacks,
+                  const std::string& file_name,
+                  int sample_rate)
+      : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
+    file_size_in_bytes_ = test::GetFileSize(file_name);
+    sample_rate_ = sample_rate;
+    EXPECT_GE(file_size_in_callbacks(), num_callbacks)
+        << "Size of test file is not large enough to last during the test.";
+    const int num_16bit_samples =
+        test::GetFileSize(file_name) / kBytesPerSample;
+    file_.reset(new int16_t[num_16bit_samples]);
+    FILE* audio_file = fopen(file_name.c_str(), "rb");
+    EXPECT_NE(audio_file, nullptr);
+    int num_samples_read =
+        fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+    EXPECT_EQ(num_samples_read, num_16bit_samples);
+    fclose(audio_file);
+  }
+
+  // AudioStreamInterface::Write() is not implemented.
+  void Write(const void* source, int num_frames) override {}
+
+  // Read samples from file stored in memory (at construction) and copy
+  // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+  void Read(void* destination, int num_frames) override {
+    memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
+           num_frames * sizeof(int16_t));
+    file_pos_ += num_frames;
+  }
+
+  int file_size_in_seconds() const {
+    return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+  }
+  int file_size_in_callbacks() const {
+    return file_size_in_seconds() * kNumCallbacksPerSecond;
+  }
+
+ private:
+  int file_size_in_bytes_;
+  int sample_rate_;
+  rtc::scoped_ptr<int16_t[]> file_;
+  int file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+  explicit FifoAudioStream(int frames_per_buffer)
+      : frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        fifo_(new AudioBufferList),
+        largest_size_(0),
+        total_written_elements_(0),
+        write_count_(0) {
+    EXPECT_NE(fifo_.get(), nullptr);
+  }
+
+  ~FifoAudioStream() { Flush(); }
+
+  // Allocate new memory, copy |num_frames| samples from |source| into memory
+  // and add pointer to the memory location to end of the list.
+  // Increases the size of the FIFO by one element.
+  void Write(const void* source, int num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("+");
+    if (write_count_++ < kNumIgnoreFirstCallbacks) {
+      return;
+    }
+    int16_t* memory = new int16_t[frames_per_buffer_];
+    memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
+    rtc::CritScope lock(&lock_);
+    fifo_->push_back(memory);
+    const int size = fifo_->size();
+    if (size > largest_size_) {
+      largest_size_ = size;
+      PRINTD("(%d)", largest_size_);
+    }
+    total_written_elements_ += size;
+  }
+
+  // Read pointer to data buffer from front of list, copy |num_frames| of stored
+  // data into |destination| and delete the utilized memory allocation.
+  // Decreases the size of the FIFO by one element.
+  void Read(void* destination, int num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    PRINTD("-");
+    rtc::CritScope lock(&lock_);
+    if (fifo_->empty()) {
+      memset(destination, 0, bytes_per_buffer_);
+    } else {
+      int16_t* memory = fifo_->front();
+      fifo_->pop_front();
+      memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
+      delete memory;
+    }
+  }
+
+  int size() const { return fifo_->size(); }
+
+  int largest_size() const { return largest_size_; }
+
+  int average_size() const {
+    return (total_written_elements_ == 0)
+               ? 0.0
+               : 0.5 +
+                     static_cast<float>(total_written_elements_) /
+                         (write_count_ - kNumIgnoreFirstCallbacks);
+  }
+
+ private:
+  void Flush() {
+    for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+      delete *it;
+    }
+    fifo_->clear();
+  }
+
+  using AudioBufferList = std::list<int16_t*>;
+  rtc::CriticalSection lock_;
+  const int frames_per_buffer_;
+  const int bytes_per_buffer_;
+  rtc::scoped_ptr<AudioBufferList> fifo_;
+  int largest_size_;
+  int total_written_elements_;
+  int write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+  explicit LatencyMeasuringAudioStream(int frames_per_buffer)
+      : clock_(Clock::GetRealTimeClock()),
+        frames_per_buffer_(frames_per_buffer),
+        bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+        play_count_(0),
+        rec_count_(0),
+        pulse_time_(0) {}
+
+  // Insert periodic impulses in first two samples of |destination|.
+  void Read(void* destination, int num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    if (play_count_ == 0) {
+      PRINT("[");
+    }
+    play_count_++;
+    memset(destination, 0, bytes_per_buffer_);
+    if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+      if (pulse_time_ == 0) {
+        pulse_time_ = clock_->TimeInMilliseconds();
+      }
+      PRINT(".");
+      const int16_t impulse = std::numeric_limits<int16_t>::max();
+      int16_t* ptr16 = static_cast<int16_t*>(destination);
+      for (int i = 0; i < 2; ++i) {
+        *ptr16++ = impulse;
+      }
+    }
+  }
+
+  // Detect received impulses in |source|, derive time between transmission and
+  // detection and add the calculated delay to list of latencies.
+  void Write(const void* source, int num_frames) override {
+    ASSERT_EQ(num_frames, frames_per_buffer_);
+    rec_count_++;
+    if (pulse_time_ == 0) {
+      // Avoid detection of new impulse response until a new impulse has
+      // been transmitted (sets |pulse_time_| to value larger than zero).
+      return;
+    }
+    const int16_t* ptr16 = static_cast<const int16_t*>(source);
+    std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+    // Find max value in the audio buffer.
+    int max = *std::max_element(vec.begin(), vec.end());
+    // Find index (element position in vector) of the max element.
+    int index_of_max =
+        std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
+    if (max > kImpulseThreshold) {
+      PRINTD("(%d,%d)", max, index_of_max);
+      int64_t now_time = clock_->TimeInMilliseconds();
+      int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+      PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
+      PRINTD("[%d]", extra_delay);
+      // Total latency is the difference between transmit time and detection
+      // tome plus the extra delay within the buffer in which we detected the
+      // received impulse. It is transmitted at sample 0 but can be received
+      // at sample N where N > 0. The term |extra_delay| accounts for N and it
+      // is a value between 0 and 10ms.
+      latencies_.push_back(now_time - pulse_time_ + extra_delay);
+      pulse_time_ = 0;
+    } else {
+      PRINTD("-");
+    }
+  }
+
+  int num_latency_values() const { return latencies_.size(); }
+
+  int min_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::min_element(latencies_.begin(), latencies_.end());
+  }
+
+  int max_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return *std::max_element(latencies_.begin(), latencies_.end());
+  }
+
+  int average_latency() const {
+    if (latencies_.empty())
+      return 0;
+    return 0.5 +
+           static_cast<double>(
+               std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+               latencies_.size();
+  }
+
+  void PrintResults() const {
+    PRINT("] ");
+    for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+      PRINT("%d ", *it);
+    }
+    PRINT("\n");
+    PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+          max_latency(), average_latency());
+  }
+
+  int IndexToMilliseconds(double index) const {
+    return 10.0 * (index / frames_per_buffer_) + 0.5;
+  }
+
+ private:
+  Clock* clock_;
+  const int frames_per_buffer_;
+  const int bytes_per_buffer_;
+  int play_count_;
+  int rec_count_;
+  int64_t pulse_time_;
+  std::vector<int> latencies_;
+};
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransport : public AudioTransport {
+ public:
+  explicit MockAudioTransport(int type)
+      : num_callbacks_(0),
+        type_(type),
+        play_count_(0),
+        rec_count_(0),
+        audio_stream_(nullptr) {}
+
+  virtual ~MockAudioTransport() {}
+
+  MOCK_METHOD10(RecordedDataIsAvailable,
+                int32_t(const void* audioSamples,
+                        const uint32_t nSamples,
+                        const uint8_t nBytesPerSample,
+                        const uint8_t nChannels,
+                        const uint32_t samplesPerSec,
+                        const uint32_t totalDelayMS,
+                        const int32_t clockDrift,
+                        const uint32_t currentMicLevel,
+                        const bool keyPressed,
+                        uint32_t& newMicLevel));
+  MOCK_METHOD8(NeedMorePlayData,
+               int32_t(const uint32_t nSamples,
+                       const uint8_t nBytesPerSample,
+                       const uint8_t nChannels,
+                       const uint32_t samplesPerSec,
+                       void* audioSamples,
+                       uint32_t& nSamplesOut,
+                       int64_t* elapsed_time_ms,
+                       int64_t* ntp_time_ms));
+
+  // Set default actions of the mock object. We are delegating to fake
+  // implementations (of AudioStreamInterface) here.
+  void HandleCallbacks(EventWrapper* test_is_done,
+                       AudioStreamInterface* audio_stream,
+                       int num_callbacks) {
+    test_is_done_ = test_is_done;
+    audio_stream_ = audio_stream;
+    num_callbacks_ = num_callbacks;
+    if (play_mode()) {
+      ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+          .WillByDefault(
+              Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
+    }
+    if (rec_mode()) {
+      ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+          .WillByDefault(
+              Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
+    }
+  }
+
+  int32_t RealRecordedDataIsAvailable(const void* audioSamples,
+                                      const uint32_t nSamples,
+                                      const uint8_t nBytesPerSample,
+                                      const uint8_t nChannels,
+                                      const uint32_t samplesPerSec,
+                                      const uint32_t totalDelayMS,
+                                      const int32_t clockDrift,
+                                      const uint32_t currentMicLevel,
+                                      const bool keyPressed,
+                                      uint32_t& newMicLevel) {
+    EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+    rec_count_++;
+    // Process the recorded audio stream if an AudioStreamInterface
+    // implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Write(audioSamples, nSamples);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      test_is_done_->Set();
+    }
+    return 0;
+  }
+
+  int32_t RealNeedMorePlayData(const uint32_t nSamples,
+                               const uint8_t nBytesPerSample,
+                               const uint8_t nChannels,
+                               const uint32_t samplesPerSec,
+                               void* audioSamples,
+                               uint32_t& nSamplesOut,
+                               int64_t* elapsed_time_ms,
+                               int64_t* ntp_time_ms) {
+    EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+    play_count_++;
+    nSamplesOut = nSamples;
+    // Read (possibly processed) audio stream samples to be played out if an
+    // AudioStreamInterface implementation exists.
+    if (audio_stream_) {
+      audio_stream_->Read(audioSamples, nSamples);
+    }
+    if (ReceivedEnoughCallbacks()) {
+      test_is_done_->Set();
+    }
+    return 0;
+  }
+
+  bool ReceivedEnoughCallbacks() {
+    bool recording_done = false;
+    if (rec_mode())
+      recording_done = rec_count_ >= num_callbacks_;
+    else
+      recording_done = true;
+
+    bool playout_done = false;
+    if (play_mode())
+      playout_done = play_count_ >= num_callbacks_;
+    else
+      playout_done = true;
+
+    return recording_done && playout_done;
+  }
+
+  bool play_mode() const { return type_ & kPlayout; }
+  bool rec_mode() const { return type_ & kRecording; }
+
+ private:
+  EventWrapper* test_is_done_;
+  int num_callbacks_;
+  int type_;
+  int play_count_;
+  int rec_count_;
+  AudioStreamInterface* audio_stream_;
+};
+
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
+ protected:
+  AudioDeviceTest() : test_is_done_(EventWrapper::Create()) {
+    old_sev_ = rtc::LogMessage::GetLogToDebug();
+    // Set suitable logging level here. Change to rtc::LS_INFO for more verbose
+    // output. See webrtc/base/logging.h for complete list of options.
+    rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+    // Add extra logging fields here (timestamps and thread id).
+    // rtc::LogMessage::LogTimestamps();
+    rtc::LogMessage::LogThreads();
+    // Creates an audio device using a default audio layer.
+    audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+    EXPECT_NE(audio_device_.get(), nullptr);
+    EXPECT_EQ(0, audio_device_->Init());
+    EXPECT_EQ(0,
+              audio_device()->GetPlayoutAudioParameters(&playout_parameters_));
+    EXPECT_EQ(0, audio_device()->GetRecordAudioParameters(&record_parameters_));
+  }
+  virtual ~AudioDeviceTest() {
+    EXPECT_EQ(0, audio_device_->Terminate());
+    rtc::LogMessage::LogToDebug(old_sev_);
+  }
+
+  // TODO(henrika): don't use hardcoded values below.
+  int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+  int record_sample_rate() const { return record_parameters_.sample_rate(); }
+  int playout_channels() const { return playout_parameters_.channels(); }
+  int record_channels() const { return record_parameters_.channels(); }
+  int playout_frames_per_10ms_buffer() const {
+    return playout_parameters_.frames_per_10ms_buffer();
+  }
+  int record_frames_per_10ms_buffer() const {
+    return record_parameters_.frames_per_10ms_buffer();
+  }
+
+  int total_delay_ms() const {
+    // TODO(henrika): improve this part.
+    return 100;
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
+    return audio_device_;
+  }
+
+  AudioDeviceModuleImpl* audio_device_impl() const {
+    return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
+  }
+
+  AudioDeviceBuffer* audio_device_buffer() const {
+    return audio_device_impl()->GetAudioDeviceBuffer();
+  }
+
+  rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+      AudioDeviceModule::AudioLayer audio_layer) {
+    rtc::scoped_refptr<AudioDeviceModule> module(
+        AudioDeviceModuleImpl::Create(0, audio_layer));
+    return module;
+  }
+
+  // Returns file name relative to the resource root given a sample rate.
+  std::string GetFileName(int sample_rate) {
+    EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100 ||
+                sample_rate == 16000);
+    char fname[64];
+    snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
+             sample_rate / 1000);
+    std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
+    EXPECT_TRUE(test::FileExists(file_name));
+#ifdef ENABLE_DEBUG_PRINTF
+    PRINTD("file name: %s\n", file_name.c_str());
+    const int bytes = test::GetFileSize(file_name);
+    PRINTD("file size: %d [bytes]\n", bytes);
+    PRINTD("file size: %d [samples]\n", bytes / kBytesPerSample);
+    const int seconds = bytes / (sample_rate * kBytesPerSample);
+    PRINTD("file size: %d [secs]\n", seconds);
+    PRINTD("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
+#endif
+    return file_name;
+  }
+
+  void StartPlayout() {
+    EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+    EXPECT_FALSE(audio_device()->Playing());
+    EXPECT_EQ(0, audio_device()->InitPlayout());
+    EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartPlayout());
+    EXPECT_TRUE(audio_device()->Playing());
+  }
+
+  void StopPlayout() {
+    EXPECT_EQ(0, audio_device()->StopPlayout());
+    EXPECT_FALSE(audio_device()->Playing());
+    EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+  }
+
+  void StartRecording() {
+    EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+    EXPECT_FALSE(audio_device()->Recording());
+    EXPECT_EQ(0, audio_device()->InitRecording());
+    EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+    EXPECT_EQ(0, audio_device()->StartRecording());
+    EXPECT_TRUE(audio_device()->Recording());
+  }
+
+  void StopRecording() {
+    EXPECT_EQ(0, audio_device()->StopRecording());
+    EXPECT_FALSE(audio_device()->Recording());
+  }
+
+  rtc::scoped_ptr<EventWrapper> test_is_done_;
+  rtc::scoped_refptr<AudioDeviceModule> audio_device_;
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+  rtc::LoggingSeverity old_sev_;
+};
+
+TEST_F(AudioDeviceTest, ConstructDestruct) {
+  // Using the test fixture to create and destruct the audio device module.
+}
+
+TEST_F(AudioDeviceTest, InitTerminate) {
+  // Initialization is part of the test fixture.
+  EXPECT_TRUE(audio_device()->Initialized());
+  // webrtc::SleepMs(5 * 1000);
+  EXPECT_EQ(0, audio_device()->Terminate());
+  EXPECT_FALSE(audio_device()->Initialized());
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopPlayout) {
+  StartPlayout();
+  StopPlayout();
+  StartPlayout();
+  StopPlayout();
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+  StartRecording();
+  StopRecording();
+  StartRecording();
+  StopRecording();
+}
+
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+  EXPECT_EQ(0, audio_device()->InitPlayout());
+  EXPECT_EQ(0, audio_device()->StartPlayout());
+  EXPECT_EQ(0, audio_device()->StopPlayout());
+  EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+  MockAudioTransport mock(kPlayout);
+  mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_->Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+  MockAudioTransport mock(kRecording);
+  mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+  EXPECT_CALL(mock,
+              RecordedDataIsAvailable(
+                  NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+                  record_channels(), record_sample_rate(),
+                  _,  // TODO(henrika): fix delay
+                  0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  test_is_done_->Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+  MockAudioTransport mock(kPlayout | kRecording);
+  mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+  EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+                                     kBytesPerSample, playout_channels(),
+                                     playout_sample_rate(), NotNull(), _, _, _))
+      .Times(AtLeast(kNumCallbacks));
+  EXPECT_CALL(mock,
+              RecordedDataIsAvailable(
+                  NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+                  record_channels(), record_sample_rate(),
+                  _,  // TODO(henrika): fix delay
+                  0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  StartRecording();
+  test_is_done_->Wait(kTestTimeOutInMilliseconds);
+  StopRecording();
+  StopPlayout();
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+  // TODO(henrika): extend test when mono output is supported.
+  EXPECT_EQ(1, playout_channels());
+  NiceMock<MockAudioTransport> mock(kPlayout);
+  const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+  std::string file_name = GetFileName(playout_sample_rate());
+  rtc::scoped_ptr<FileAudioStream> file_audio_stream(
+      new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+  mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
+                       num_callbacks);
+  // SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartPlayout();
+  test_is_done_->Wait(kTestTimeOutInMilliseconds);
+  StopPlayout();
+}
+
+TEST_F(AudioDeviceTest, Devices) {
+  // Device enumeration is not supported. Verify fixed values only.
+  EXPECT_EQ(1, audio_device()->PlayoutDevices());
+  EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+  rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
+      new FifoAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
+                       kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+  // SetMaxPlayoutVolume();
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  StartRecording();
+  StartPlayout();
+  test_is_done_->Wait(
+      std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
+  StopPlayout();
+  StopRecording();
+  EXPECT_LE(fifo_audio_stream->average_size(), 10);
+  EXPECT_LE(fifo_audio_stream->largest_size(), 20);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+  EXPECT_EQ(record_channels(), playout_channels());
+  EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+  NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+  rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+      new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
+  mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
+                       kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+  EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+  // SetMaxPlayoutVolume();
+  // DisableBuiltInAECIfAvailable();
+  StartRecording();
+  StartPlayout();
+  test_is_done_->Wait(
+      std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
+  StopPlayout();
+  StopRecording();
+  // Verify that the correct number of transmitted impulses are detected.
+  EXPECT_EQ(latency_audio_stream->num_latency_values(),
+            kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1);
+  latency_audio_stream->PrintResults();
+}
+
+}  // namespace webrtc
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index 3ecf759..b0b3e79 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -65,7 +65,6 @@
             'bwe_simulator',
             'cng',
             'desktop_capture',
-            'isac',
             'isac_fix',
             'media_file',
             'neteq',
@@ -361,11 +360,15 @@
             ['OS=="ios"', {
               'sources': [
                 'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
+                'audio_device/ios/audio_device_unittest_ios.cc',
               ],
               'mac_bundle_resources': [
                 '<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
                 '<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
                 '<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
+                '<(DEPTH)/resources/audio_device/audio_short16.pcm',
+                '<(DEPTH)/resources/audio_device/audio_short44.pcm',
+                '<(DEPTH)/resources/audio_device/audio_short48.pcm',
                 '<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
                 '<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
                 '<(DEPTH)/resources/audio_processing/agc/agc_pitch_lag.dat',
diff --git a/webrtc/modules/utility/interface/helpers_ios.h b/webrtc/modules/utility/interface/helpers_ios.h
new file mode 100644
index 0000000..1e6075f
--- /dev/null
+++ b/webrtc/modules/utility/interface/helpers_ios.h
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+
+#if defined(WEBRTC_IOS)
+
+#include <string>
+
+namespace webrtc {
+namespace ios {
+
+bool CheckAndLogError(BOOL success, NSError* error);
+
+// Return thread ID as a string.
+std::string GetThreadId();
+
+// Return thread ID as string suitable for debug logging.
+std::string GetThreadInfo();
+
+// Returns [NSThread currentThread] description as string.
+// Example: <NSThread: 0x170066d80>{number = 1, name = main}
+std::string GetCurrentThreadDescription();
+
+// Returns the current name of the operating system.
+std::string GetSystemName();
+
+// Returns the current version of the operating system.
+std::string GetSystemVersion();
+
+// Returns the version of the operating system as a floating point value.
+float GetSystemVersionAsFloat();
+
+// Returns the device type.
+// Examples: ”iPhone” and ”iPod touch”.
+std::string GetDeviceType();
+
+// Returns a more detailed device name.
+// Examples: "iPhone 5s (GSM)" and "iPhone 6 Plus".
+std::string GetDeviceName();
+
+}  // namespace ios
+}  // namespace webrtc
+
+#endif  // defined(WEBRTC_IOS)
+
+#endif  // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
diff --git a/webrtc/modules/utility/source/helpers_ios.mm b/webrtc/modules/utility/source/helpers_ios.mm
new file mode 100644
index 0000000..d362530
--- /dev/null
+++ b/webrtc/modules/utility/source/helpers_ios.mm
@@ -0,0 +1,172 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_IOS)
+
+#import <Foundation/Foundation.h>
+#import <sys/sysctl.h>
+#import <UIKit/UIKit.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/utility/interface/helpers_ios.h"
+
+namespace webrtc {
+namespace ios {
+
+// TODO(henrika): move to shared location.
+// See https://code.google.com/p/webrtc/issues/detail?id=4773 for details.
+NSString* NSStringFromStdString(const std::string& stdString) {
+  // std::string may contain null termination character so we construct
+  // using length.
+  return [[NSString alloc] initWithBytes:stdString.data()
+                                  length:stdString.length()
+                                encoding:NSUTF8StringEncoding];
+}
+
+std::string StdStringFromNSString(NSString* nsString) {
+  NSData* charData = [nsString dataUsingEncoding:NSUTF8StringEncoding];
+  return std::string(reinterpret_cast<const char*>([charData bytes]),
+                     [charData length]);
+}
+
+bool CheckAndLogError(BOOL success, NSError* error) {
+  if (!success) {
+    NSString* msg =
+        [NSString stringWithFormat:@"Error: %ld, %@, %@", (long)error.code,
+                                   error.localizedDescription,
+                                   error.localizedFailureReason];
+    LOG(LS_ERROR) << StdStringFromNSString(msg);
+    return false;
+  }
+  return true;
+}
+
+// TODO(henrika): see if it is possible to move to GetThreadName in
+// platform_thread.h and base it on pthread methods instead.
+std::string GetCurrentThreadDescription() {
+  NSString* name = [NSString stringWithFormat:@"%@", [NSThread currentThread]];
+  return StdStringFromNSString(name);
+}
+
+std::string GetSystemName() {
+  NSString* osName = [[UIDevice currentDevice] systemName];
+  return StdStringFromNSString(osName);
+}
+
+std::string GetSystemVersion() {
+  NSString* osVersion = [[UIDevice currentDevice] systemVersion];
+  return StdStringFromNSString(osVersion);
+}
+
+float GetSystemVersionAsFloat() {
+  NSString* osVersion = [[UIDevice currentDevice] systemVersion];
+  return osVersion.floatValue;
+}
+
+std::string GetDeviceType() {
+  NSString* deviceModel = [[UIDevice currentDevice] model];
+  return StdStringFromNSString(deviceModel);
+}
+
+std::string GetDeviceName() {
+  size_t size;
+  sysctlbyname("hw.machine", NULL, &size, NULL, 0);
+  rtc::scoped_ptr<char[]> machine;
+  machine.reset(new char[size]);
+  sysctlbyname("hw.machine", machine.get(), &size, NULL, 0);
+  std::string raw_name(machine.get());
+  if (!raw_name.compare("iPhone1,1"))
+    return std::string("iPhone 1G");
+  if (!raw_name.compare("iPhone1,2"))
+    return std::string("iPhone 3G");
+  if (!raw_name.compare("iPhone2,1"))
+    return std::string("iPhone 3GS");
+  if (!raw_name.compare("iPhone3,1"))
+    return std::string("iPhone 4");
+  if (!raw_name.compare("iPhone3,3"))
+    return std::string("Verizon iPhone 4");
+  if (!raw_name.compare("iPhone4,1"))
+    return std::string("iPhone 4S");
+  if (!raw_name.compare("iPhone5,1"))
+    return std::string("iPhone 5 (GSM)");
+  if (!raw_name.compare("iPhone5,2"))
+    return std::string("iPhone 5 (GSM+CDMA)");
+  if (!raw_name.compare("iPhone5,3"))
+    return std::string("iPhone 5c (GSM)");
+  if (!raw_name.compare("iPhone5,4"))
+    return std::string("iPhone 5c (GSM+CDMA)");
+  if (!raw_name.compare("iPhone6,1"))
+    return std::string("iPhone 5s (GSM)");
+  if (!raw_name.compare("iPhone6,2"))
+    return std::string("iPhone 5s (GSM+CDMA)");
+  if (!raw_name.compare("iPhone7,1"))
+    return std::string("iPhone 6 Plus");
+  if (!raw_name.compare("iPhone7,2"))
+    return std::string("iPhone 6");
+  if (!raw_name.compare("iPod1,1"))
+    return std::string("iPod Touch 1G");
+  if (!raw_name.compare("iPod2,1"))
+    return std::string("iPod Touch 2G");
+  if (!raw_name.compare("iPod3,1"))
+    return std::string("iPod Touch 3G");
+  if (!raw_name.compare("iPod4,1"))
+    return std::string("iPod Touch 4G");
+  if (!raw_name.compare("iPod5,1"))
+    return std::string("iPod Touch 5G");
+  if (!raw_name.compare("iPad1,1"))
+    return std::string("iPad");
+  if (!raw_name.compare("iPad2,1"))
+    return std::string("iPad 2 (WiFi)");
+  if (!raw_name.compare("iPad2,2"))
+    return std::string("iPad 2 (GSM)");
+  if (!raw_name.compare("iPad2,3"))
+    return std::string("iPad 2 (CDMA)");
+  if (!raw_name.compare("iPad2,4"))
+    return std::string("iPad 2 (WiFi)");
+  if (!raw_name.compare("iPad2,5"))
+    return std::string("iPad Mini (WiFi)");
+  if (!raw_name.compare("iPad2,6"))
+    return std::string("iPad Mini (GSM)");
+  if (!raw_name.compare("iPad2,7"))
+    return std::string("iPad Mini (GSM+CDMA)");
+  if (!raw_name.compare("iPad3,1"))
+    return std::string("iPad 3 (WiFi)");
+  if (!raw_name.compare("iPad3,2"))
+    return std::string("iPad 3 (GSM+CDMA)");
+  if (!raw_name.compare("iPad3,3"))
+    return std::string("iPad 3 (GSM)");
+  if (!raw_name.compare("iPad3,4"))
+    return std::string("iPad 4 (WiFi)");
+  if (!raw_name.compare("iPad3,5"))
+    return std::string("iPad 4 (GSM)");
+  if (!raw_name.compare("iPad3,6"))
+    return std::string("iPad 4 (GSM+CDMA)");
+  if (!raw_name.compare("iPad4,1"))
+    return std::string("iPad Air (WiFi)");
+  if (!raw_name.compare("iPad4,2"))
+    return std::string("iPad Air (Cellular)");
+  if (!raw_name.compare("iPad4,4"))
+    return std::string("iPad mini 2G (WiFi)");
+  if (!raw_name.compare("iPad4,5"))
+    return std::string("iPad mini 2G (Cellular)");
+  if (!raw_name.compare("i386"))
+    return std::string("Simulator");
+  if (!raw_name.compare("x86_64"))
+    return std::string("Simulator");
+  LOG(LS_WARNING) << "Failed to find device name";
+  return raw_name;
+}
+
+}  // namespace ios
+}  // namespace webrtc
+
+#endif  // defined(WEBRTC_IOS)
diff --git a/webrtc/modules/utility/utility.gypi b/webrtc/modules/utility/utility.gypi
index 1a203bf..38c9e3e 100644
--- a/webrtc/modules/utility/utility.gypi
+++ b/webrtc/modules/utility/utility.gypi
@@ -22,6 +22,7 @@
         'interface/file_player.h',
         'interface/file_recorder.h',
         'interface/helpers_android.h',
+        'interface/helpers_ios.h',
         'interface/jvm_android.h',
         'interface/process_thread.h',
         'source/audio_frame_operations.cc',
@@ -32,6 +33,7 @@
         'source/file_recorder_impl.cc',
         'source/file_recorder_impl.h',
         'source/helpers_android.cc',
+        'source/helpers_ios.mm',
         'source/jvm_android.cc',
         'source/process_thread_impl.cc',
         'source/process_thread_impl.h',