Create a copy of talk/sound under webrtc/sound.

BUG=3379
R=andrew@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/22379004

git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@6986 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/base/base.gyp b/base/base.gyp
index 0e5845e..cf35b6b 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -29,7 +29,6 @@
       'type': 'static_library',
       'defines': [
         'FEATURE_ENABLE_SSL',
-        'GTEST_RELATIVE_PATH',
         'LOGGING=1',
         'USE_WEBRTC_DEV_BRANCH',
       ],
@@ -332,7 +331,6 @@
         ],
         'defines': [
           'FEATURE_ENABLE_SSL',
-          'GTEST_RELATIVE_PATH',
         ],
       },
       'include_dirs': [
diff --git a/base/base_tests.gyp b/base/base_tests.gyp
index 3cef102..690932d 100644
--- a/base/base_tests.gyp
+++ b/base/base_tests.gyp
@@ -25,10 +25,18 @@
         'testutils.h',
         'win32toolhelp.h',
       ],
+      'defines': [
+        'GTEST_RELATIVE_PATH',
+      ],
       'dependencies': [
         'base.gyp:webrtc_base',
         '<(DEPTH)/testing/gtest.gyp:gtest',
       ],
+      'direct_dependent_settings': {
+        'defines': [
+          'GTEST_RELATIVE_PATH',
+        ],
+      },
       'export_dependent_settings': [
         '<(DEPTH)/testing/gtest.gyp:gtest',
       ],
diff --git a/build/merge_libs.gyp b/build/merge_libs.gyp
index 4f8cdac..e8c5c99 100644
--- a/build/merge_libs.gyp
+++ b/build/merge_libs.gyp
@@ -11,6 +11,7 @@
   'variables': {
     'merge_libs_dependencies': [
       '../webrtc.gyp:webrtc',
+      '../sound/sound.gyp:rtc_sound',
     ],
   },
   'targets': [
diff --git a/sound/alsasoundsystem.cc b/sound/alsasoundsystem.cc
new file mode 100644
index 0000000..c2be190
--- /dev/null
+++ b/sound/alsasoundsystem.cc
@@ -0,0 +1,744 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/alsasoundsystem.h"
+
+#include "webrtc/sound/sounddevicelocator.h"
+#include "webrtc/sound/soundinputstreaminterface.h"
+#include "webrtc/sound/soundoutputstreaminterface.h"
+#include "webrtc/base/common.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/stringutils.h"
+#include "webrtc/base/timeutils.h"
+#include "webrtc/base/worker.h"
+
+namespace rtc {
+
+// Lookup table from the rtc format enum in soundsysteminterface.h to
+// ALSA's enums.
+static const snd_pcm_format_t kCricketFormatToAlsaFormatTable[] = {
+  // The order here must match the order in soundsysteminterface.h
+  SND_PCM_FORMAT_S16_LE,
+};
+
+// Lookup table for the size of a single sample of a given format.
+static const size_t kCricketFormatToSampleSizeTable[] = {
+  // The order here must match the order in soundsysteminterface.h
+  sizeof(int16_t),  // 2
+};
+
+// Minimum latency we allow, in microseconds. This is more or less arbitrary,
+// but it has to be at least large enough to be able to buffer data during a
+// missed context switch, and the typical Linux scheduling quantum is 10ms.
+static const int kMinimumLatencyUsecs = 20 * 1000;
+
+// The latency we'll use for kNoLatencyRequirements (chosen arbitrarily).
+static const int kDefaultLatencyUsecs = kMinimumLatencyUsecs * 2;
+
+// We translate newlines in ALSA device descriptions to hyphens.
+static const char kAlsaDescriptionSearch[] = "\n";
+static const char kAlsaDescriptionReplace[] = " - ";
+
+class AlsaDeviceLocator : public SoundDeviceLocator {
+ public:
+  AlsaDeviceLocator(const std::string &name,
+                    const std::string &device_name)
+      : SoundDeviceLocator(name, device_name) {
+    // The ALSA descriptions have newlines in them, which won't show up in
+    // a drop-down box. Replace them with hyphens.
+    rtc::replace_substrs(kAlsaDescriptionSearch,
+                               sizeof(kAlsaDescriptionSearch) - 1,
+                               kAlsaDescriptionReplace,
+                               sizeof(kAlsaDescriptionReplace) - 1,
+                               &name_);
+  }
+
+  virtual SoundDeviceLocator *Copy() const {
+    return new AlsaDeviceLocator(*this);
+  }
+};
+
+// Functionality that is common to both AlsaInputStream and AlsaOutputStream.
+class AlsaStream {
+ public:
+  AlsaStream(AlsaSoundSystem *alsa,
+             snd_pcm_t *handle,
+             size_t frame_size,
+             int wait_timeout_ms,
+             int flags,
+             int freq)
+      : alsa_(alsa),
+        handle_(handle),
+        frame_size_(frame_size),
+        wait_timeout_ms_(wait_timeout_ms),
+        flags_(flags),
+        freq_(freq) {
+  }
+
+  ~AlsaStream() {
+    Close();
+  }
+
+  // Waits for the stream to be ready to accept/return more data, and returns
+  // how much can be written/read, or 0 if we need to Wait() again.
+  snd_pcm_uframes_t Wait() {
+    snd_pcm_sframes_t frames;
+    // Ideally we would not use snd_pcm_wait() and instead hook snd_pcm_poll_*
+    // into PhysicalSocketServer, but PhysicalSocketServer is nasty enough
+    // already and the current clients of SoundSystemInterface do not run
+    // anything else on their worker threads, so snd_pcm_wait() is good enough.
+    frames = symbol_table()->snd_pcm_avail_update()(handle_);
+    if (frames < 0) {
+      LOG(LS_ERROR) << "snd_pcm_avail_update(): " << GetError(frames);
+      Recover(frames);
+      return 0;
+    } else if (frames > 0) {
+      // Already ready, so no need to wait.
+      return frames;
+    }
+    // Else no space/data available, so must wait.
+    int ready = symbol_table()->snd_pcm_wait()(handle_, wait_timeout_ms_);
+    if (ready < 0) {
+      LOG(LS_ERROR) << "snd_pcm_wait(): " << GetError(ready);
+      Recover(ready);
+      return 0;
+    } else if (ready == 0) {
+      // Timeout, so nothing can be written/read right now.
+      // We set the timeout to twice the requested latency, so continuous
+      // timeouts are indicative of a problem, so log as a warning.
+      LOG(LS_WARNING) << "Timeout while waiting on stream";
+      return 0;
+    }
+    // Else ready > 0 (i.e., 1), so it's ready. Get count.
+    frames = symbol_table()->snd_pcm_avail_update()(handle_);
+    if (frames < 0) {
+      LOG(LS_ERROR) << "snd_pcm_avail_update(): " << GetError(frames);
+      Recover(frames);
+      return 0;
+    } else if (frames == 0) {
+      // wait() said we were ready, so this ought to have been positive. Has
+      // been observed to happen in practice though.
+      LOG(LS_WARNING) << "Spurious wake-up";
+    }
+    return frames;
+  }
+
+  int CurrentDelayUsecs() {
+    if (!(flags_ & SoundSystemInterface::FLAG_REPORT_LATENCY)) {
+      return 0;
+    }
+
+    snd_pcm_sframes_t delay;
+    int err = symbol_table()->snd_pcm_delay()(handle_, &delay);
+    if (err != 0) {
+      LOG(LS_ERROR) << "snd_pcm_delay(): " << GetError(err);
+      Recover(err);
+      // We'd rather continue playout/capture with an incorrect delay than stop
+      // it altogether, so return a valid value.
+      return 0;
+    }
+    // The delay is in frames. Convert to microseconds.
+    return delay * rtc::kNumMicrosecsPerSec / freq_;
+  }
+
+  // Used to recover from certain recoverable errors, principally buffer overrun
+  // or underrun (identified as EPIPE). Without calling this the stream stays
+  // in the error state forever.
+  bool Recover(int error) {
+    int err;
+    err = symbol_table()->snd_pcm_recover()(
+        handle_,
+        error,
+        // Silent; i.e., no logging on stderr.
+        1);
+    if (err != 0) {
+      // Docs say snd_pcm_recover returns the original error if it is not one
+      // of the recoverable ones, so this log message will probably contain the
+      // same error twice.
+      LOG(LS_ERROR) << "Unable to recover from \"" << GetError(error) << "\": "
+                    << GetError(err);
+      return false;
+    }
+    if (error == -EPIPE &&  // Buffer underrun/overrun.
+        symbol_table()->snd_pcm_stream()(handle_) == SND_PCM_STREAM_CAPTURE) {
+      // For capture streams we also have to repeat the explicit start() to get
+      // data flowing again.
+      err = symbol_table()->snd_pcm_start()(handle_);
+      if (err != 0) {
+        LOG(LS_ERROR) << "snd_pcm_start(): " << GetError(err);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  bool Close() {
+    if (handle_) {
+      int err;
+      err = symbol_table()->snd_pcm_drop()(handle_);
+      if (err != 0) {
+        LOG(LS_ERROR) << "snd_pcm_drop(): " << GetError(err);
+        // Continue anyways.
+      }
+      err = symbol_table()->snd_pcm_close()(handle_);
+      if (err != 0) {
+        LOG(LS_ERROR) << "snd_pcm_close(): " << GetError(err);
+        // Continue anyways.
+      }
+      handle_ = NULL;
+    }
+    return true;
+  }
+
+  AlsaSymbolTable *symbol_table() {
+    return &alsa_->symbol_table_;
+  }
+
+  snd_pcm_t *handle() {
+    return handle_;
+  }
+
+  const char *GetError(int err) {
+    return alsa_->GetError(err);
+  }
+
+  size_t frame_size() {
+    return frame_size_;
+  }
+
+ private:
+  AlsaSoundSystem *alsa_;
+  snd_pcm_t *handle_;
+  size_t frame_size_;
+  int wait_timeout_ms_;
+  int flags_;
+  int freq_;
+
+  DISALLOW_COPY_AND_ASSIGN(AlsaStream);
+};
+
+// Implementation of an input stream. See soundinputstreaminterface.h regarding
+// thread-safety.
+class AlsaInputStream :
+    public SoundInputStreamInterface,
+    private rtc::Worker {
+ public:
+  AlsaInputStream(AlsaSoundSystem *alsa,
+                  snd_pcm_t *handle,
+                  size_t frame_size,
+                  int wait_timeout_ms,
+                  int flags,
+                  int freq)
+      : stream_(alsa, handle, frame_size, wait_timeout_ms, flags, freq),
+        buffer_size_(0) {
+  }
+
+  virtual ~AlsaInputStream() {
+    bool success = StopReading();
+    // We need that to live.
+    VERIFY(success);
+  }
+
+  virtual bool StartReading() {
+    return StartWork();
+  }
+
+  virtual bool StopReading() {
+    return StopWork();
+  }
+
+  virtual bool GetVolume(int *volume) {
+    // TODO: Implement this.
+    return false;
+  }
+
+  virtual bool SetVolume(int volume) {
+    // TODO: Implement this.
+    return false;
+  }
+
+  virtual bool Close() {
+    return StopReading() && stream_.Close();
+  }
+
+  virtual int LatencyUsecs() {
+    return stream_.CurrentDelayUsecs();
+  }
+
+ private:
+  // Inherited from Worker.
+  virtual void OnStart() {
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnHaveWork() {
+    // Block waiting for data.
+    snd_pcm_uframes_t avail = stream_.Wait();
+    if (avail > 0) {
+      // Data is available.
+      size_t size = avail * stream_.frame_size();
+      if (size > buffer_size_) {
+        // Must increase buffer size.
+        buffer_.reset(new char[size]);
+        buffer_size_ = size;
+      }
+      // Read all the data.
+      snd_pcm_sframes_t read = stream_.symbol_table()->snd_pcm_readi()(
+          stream_.handle(),
+          buffer_.get(),
+          avail);
+      if (read < 0) {
+        LOG(LS_ERROR) << "snd_pcm_readi(): " << GetError(read);
+        stream_.Recover(read);
+      } else if (read == 0) {
+        // Docs say this shouldn't happen.
+        ASSERT(false);
+        LOG(LS_ERROR) << "No data?";
+      } else {
+        // Got data. Pass it off to the app.
+        SignalSamplesRead(buffer_.get(),
+                          read * stream_.frame_size(),
+                          this);
+      }
+    }
+    // Check for more data with no delay, after any pending messages are
+    // dispatched.
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStop() {
+    // Nothing to do.
+  }
+
+  const char *GetError(int err) {
+    return stream_.GetError(err);
+  }
+
+  AlsaStream stream_;
+  rtc::scoped_ptr<char[]> buffer_;
+  size_t buffer_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(AlsaInputStream);
+};
+
+// Implementation of an output stream. See soundoutputstreaminterface.h
+// regarding thread-safety.
+class AlsaOutputStream :
+    public SoundOutputStreamInterface,
+    private rtc::Worker {
+ public:
+  AlsaOutputStream(AlsaSoundSystem *alsa,
+                   snd_pcm_t *handle,
+                   size_t frame_size,
+                   int wait_timeout_ms,
+                   int flags,
+                   int freq)
+      : stream_(alsa, handle, frame_size, wait_timeout_ms, flags, freq) {
+  }
+
+  virtual ~AlsaOutputStream() {
+    bool success = DisableBufferMonitoring();
+    // We need that to live.
+    VERIFY(success);
+  }
+
+  virtual bool EnableBufferMonitoring() {
+    return StartWork();
+  }
+
+  virtual bool DisableBufferMonitoring() {
+    return StopWork();
+  }
+
+  virtual bool WriteSamples(const void *sample_data,
+                            size_t size) {
+    if (size % stream_.frame_size() != 0) {
+      // No client of SoundSystemInterface does this, so let's not support it.
+      // (If we wanted to support it, we'd basically just buffer the fractional
+      // frame until we get more data.)
+      ASSERT(false);
+      LOG(LS_ERROR) << "Writes with fractional frames are not supported";
+      return false;
+    }
+    snd_pcm_uframes_t frames = size / stream_.frame_size();
+    snd_pcm_sframes_t written = stream_.symbol_table()->snd_pcm_writei()(
+        stream_.handle(),
+        sample_data,
+        frames);
+    if (written < 0) {
+      LOG(LS_ERROR) << "snd_pcm_writei(): " << GetError(written);
+      stream_.Recover(written);
+      return false;
+    } else if (static_cast<snd_pcm_uframes_t>(written) < frames) {
+      // Shouldn't happen. Drop the rest of the data.
+      LOG(LS_ERROR) << "Stream wrote only " << written << " of " << frames
+                    << " frames!";
+      return false;
+    }
+    return true;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    // TODO: Implement this.
+    return false;
+  }
+
+  virtual bool SetVolume(int volume) {
+    // TODO: Implement this.
+    return false;
+  }
+
+  virtual bool Close() {
+    return DisableBufferMonitoring() && stream_.Close();
+  }
+
+  virtual int LatencyUsecs() {
+    return stream_.CurrentDelayUsecs();
+  }
+
+ private:
+  // Inherited from Worker.
+  virtual void OnStart() {
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnHaveWork() {
+    snd_pcm_uframes_t avail = stream_.Wait();
+    if (avail > 0) {
+      size_t space = avail * stream_.frame_size();
+      SignalBufferSpace(space, this);
+    }
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStop() {
+    // Nothing to do.
+  }
+
+  const char *GetError(int err) {
+    return stream_.GetError(err);
+  }
+
+  AlsaStream stream_;
+
+  DISALLOW_COPY_AND_ASSIGN(AlsaOutputStream);
+};
+
+AlsaSoundSystem::AlsaSoundSystem() : initialized_(false) {}
+
+AlsaSoundSystem::~AlsaSoundSystem() {
+  // Not really necessary, because Terminate() doesn't really do anything.
+  Terminate();
+}
+
+bool AlsaSoundSystem::Init() {
+  if (IsInitialized()) {
+    return true;
+  }
+
+  // Load libasound.
+  if (!symbol_table_.Load()) {
+    // Very odd for a Linux machine to not have a working libasound ...
+    LOG(LS_ERROR) << "Failed to load symbol table";
+    return false;
+  }
+
+  initialized_ = true;
+
+  return true;
+}
+
+void AlsaSoundSystem::Terminate() {
+  if (!IsInitialized()) {
+    return;
+  }
+
+  initialized_ = false;
+
+  // We do not unload the symbol table because we may need it again soon if
+  // Init() is called again.
+}
+
+bool AlsaSoundSystem::EnumeratePlaybackDevices(
+    SoundDeviceLocatorList *devices) {
+  return EnumerateDevices(devices, false);
+}
+
+bool AlsaSoundSystem::EnumerateCaptureDevices(
+    SoundDeviceLocatorList *devices) {
+  return EnumerateDevices(devices, true);
+}
+
+bool AlsaSoundSystem::GetDefaultPlaybackDevice(SoundDeviceLocator **device) {
+  return GetDefaultDevice(device);
+}
+
+bool AlsaSoundSystem::GetDefaultCaptureDevice(SoundDeviceLocator **device) {
+  return GetDefaultDevice(device);
+}
+
+SoundOutputStreamInterface *AlsaSoundSystem::OpenPlaybackDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return OpenDevice<SoundOutputStreamInterface>(
+      device,
+      params,
+      SND_PCM_STREAM_PLAYBACK,
+      &AlsaSoundSystem::StartOutputStream);
+}
+
+SoundInputStreamInterface *AlsaSoundSystem::OpenCaptureDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return OpenDevice<SoundInputStreamInterface>(
+      device,
+      params,
+      SND_PCM_STREAM_CAPTURE,
+      &AlsaSoundSystem::StartInputStream);
+}
+
+const char *AlsaSoundSystem::GetName() const {
+  return "ALSA";
+}
+
+bool AlsaSoundSystem::EnumerateDevices(
+    SoundDeviceLocatorList *devices,
+    bool capture_not_playback) {
+  ClearSoundDeviceLocatorList(devices);
+
+  if (!IsInitialized()) {
+    return false;
+  }
+
+  const char *type = capture_not_playback ? "Input" : "Output";
+  // dmix and dsnoop are only for playback and capture, respectively, but ALSA
+  // stupidly includes them in both lists.
+  const char *ignore_prefix = capture_not_playback ? "dmix:" : "dsnoop:";
+  // (ALSA lists many more "devices" of questionable interest, but we show them
+  // just in case the weird devices may actually be desirable for some
+  // users/systems.)
+  const char *ignore_default = "default";
+  const char *ignore_null = "null";
+  const char *ignore_pulse = "pulse";
+  // The 'pulse' entry has a habit of mysteriously disappearing when you query
+  // a second time. Remove it from our list. (GIPS lib did the same thing.)
+  int err;
+
+  void **hints;
+  err = symbol_table_.snd_device_name_hint()(-1,     // All cards
+                                             "pcm",  // Only PCM devices
+                                             &hints);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_device_name_hint(): " << GetError(err);
+    return false;
+  }
+
+  for (void **list = hints; *list != NULL; ++list) {
+    char *actual_type = symbol_table_.snd_device_name_get_hint()(*list, "IOID");
+    if (actual_type) {  // NULL means it's both.
+      bool wrong_type = (strcmp(actual_type, type) != 0);
+      free(actual_type);
+      if (wrong_type) {
+        // Wrong type of device (i.e., input vs. output).
+        continue;
+      }
+    }
+
+    char *name = symbol_table_.snd_device_name_get_hint()(*list, "NAME");
+    if (!name) {
+      LOG(LS_ERROR) << "Device has no name???";
+      // Skip it.
+      continue;
+    }
+
+    // Now check if we actually want to show this device.
+    if (strcmp(name, ignore_default) != 0 &&
+        strcmp(name, ignore_null) != 0 &&
+        strcmp(name, ignore_pulse) != 0 &&
+        !rtc::starts_with(name, ignore_prefix)) {
+
+      // Yes, we do.
+      char *desc = symbol_table_.snd_device_name_get_hint()(*list, "DESC");
+      if (!desc) {
+        // Virtual devices don't necessarily have descriptions. Use their names
+        // instead (not pretty!).
+        desc = name;
+      }
+
+      AlsaDeviceLocator *device = new AlsaDeviceLocator(desc, name);
+
+      devices->push_back(device);
+
+      if (desc != name) {
+        free(desc);
+      }
+    }
+
+    free(name);
+  }
+
+  err = symbol_table_.snd_device_name_free_hint()(hints);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_device_name_free_hint(): " << GetError(err);
+    // Continue and return true anyways, since we did get the whole list.
+  }
+
+  return true;
+}
+
+bool AlsaSoundSystem::GetDefaultDevice(SoundDeviceLocator **device) {
+  if (!IsInitialized()) {
+    return false;
+  }
+  *device = new AlsaDeviceLocator("Default device", "default");
+  return true;
+}
+
+inline size_t AlsaSoundSystem::FrameSize(const OpenParams &params) {
+  ASSERT(static_cast<int>(params.format) <
+         ARRAY_SIZE(kCricketFormatToSampleSizeTable));
+  return kCricketFormatToSampleSizeTable[params.format] * params.channels;
+}
+
+template <typename StreamInterface>
+StreamInterface *AlsaSoundSystem::OpenDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params,
+    snd_pcm_stream_t type,
+    StreamInterface *(AlsaSoundSystem::*start_fn)(
+        snd_pcm_t *handle,
+        size_t frame_size,
+        int wait_timeout_ms,
+        int flags,
+        int freq)) {
+
+  if (!IsInitialized()) {
+    return NULL;
+  }
+
+  StreamInterface *stream;
+  int err;
+
+  const char *dev = static_cast<const AlsaDeviceLocator *>(device)->
+      device_name().c_str();
+
+  snd_pcm_t *handle = NULL;
+  err = symbol_table_.snd_pcm_open()(
+      &handle,
+      dev,
+      type,
+      // No flags.
+      0);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_pcm_open(" << dev << "): " << GetError(err);
+    return NULL;
+  }
+  LOG(LS_VERBOSE) << "Opening " << dev;
+  ASSERT(handle);  // If open succeeded, handle ought to be valid
+
+  // Compute requested latency in microseconds.
+  int latency;
+  if (params.latency == kNoLatencyRequirements) {
+    latency = kDefaultLatencyUsecs;
+  } else {
+    // kLowLatency is 0, so we treat it the same as a request for zero latency.
+    // Compute what the user asked for.
+    latency = rtc::kNumMicrosecsPerSec *
+        params.latency /
+        params.freq /
+        FrameSize(params);
+    // And this is what we'll actually use.
+    latency = rtc::_max(latency, kMinimumLatencyUsecs);
+  }
+
+  ASSERT(static_cast<int>(params.format) <
+         ARRAY_SIZE(kCricketFormatToAlsaFormatTable));
+
+  err = symbol_table_.snd_pcm_set_params()(
+      handle,
+      kCricketFormatToAlsaFormatTable[params.format],
+      // SoundSystemInterface only supports interleaved audio.
+      SND_PCM_ACCESS_RW_INTERLEAVED,
+      params.channels,
+      params.freq,
+      1,  // Allow ALSA to resample.
+      latency);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_pcm_set_params(): " << GetError(err);
+    goto fail;
+  }
+
+  err = symbol_table_.snd_pcm_prepare()(handle);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_pcm_prepare(): " << GetError(err);
+    goto fail;
+  }
+
+  stream = (this->*start_fn)(
+      handle,
+      FrameSize(params),
+      // We set the wait time to twice the requested latency, so that wait
+      // timeouts should be rare.
+      2 * latency / rtc::kNumMicrosecsPerMillisec,
+      params.flags,
+      params.freq);
+  if (stream) {
+    return stream;
+  }
+  // Else fall through.
+
+ fail:
+  err = symbol_table_.snd_pcm_close()(handle);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_pcm_close(): " << GetError(err);
+  }
+  return NULL;
+}
+
+SoundOutputStreamInterface *AlsaSoundSystem::StartOutputStream(
+    snd_pcm_t *handle,
+    size_t frame_size,
+    int wait_timeout_ms,
+    int flags,
+    int freq) {
+  // Nothing to do here but instantiate the stream.
+  return new AlsaOutputStream(
+      this, handle, frame_size, wait_timeout_ms, flags, freq);
+}
+
+SoundInputStreamInterface *AlsaSoundSystem::StartInputStream(
+    snd_pcm_t *handle,
+    size_t frame_size,
+    int wait_timeout_ms,
+    int flags,
+    int freq) {
+  // Output streams start automatically once enough data has been written, but
+  // input streams must be started manually or else snd_pcm_wait() will never
+  // return true.
+  int err;
+  err = symbol_table_.snd_pcm_start()(handle);
+  if (err != 0) {
+    LOG(LS_ERROR) << "snd_pcm_start(): " << GetError(err);
+    return NULL;
+  }
+  return new AlsaInputStream(
+      this, handle, frame_size, wait_timeout_ms, flags, freq);
+}
+
+inline const char *AlsaSoundSystem::GetError(int err) {
+  return symbol_table_.snd_strerror()(err);
+}
+
+}  // namespace rtc
diff --git a/sound/alsasoundsystem.h b/sound/alsasoundsystem.h
new file mode 100644
index 0000000..f95e686
--- /dev/null
+++ b/sound/alsasoundsystem.h
@@ -0,0 +1,103 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_ALSASOUNDSYSTEM_H_
+#define WEBRTC_SOUND_ALSASOUNDSYSTEM_H_
+
+#include "webrtc/sound/alsasymboltable.h"
+#include "webrtc/sound/soundsysteminterface.h"
+#include "webrtc/base/constructormagic.h"
+
+namespace rtc {
+
+class AlsaStream;
+class AlsaInputStream;
+class AlsaOutputStream;
+
+// Sound system implementation for ALSA, the predominant sound device API on
+// Linux (but typically not used directly by applications anymore).
+class AlsaSoundSystem : public SoundSystemInterface {
+  friend class AlsaStream;
+  friend class AlsaInputStream;
+  friend class AlsaOutputStream;
+ public:
+  static SoundSystemInterface *Create() {
+    return new AlsaSoundSystem();
+  }
+
+  AlsaSoundSystem();
+
+  virtual ~AlsaSoundSystem();
+
+  virtual bool Init();
+  virtual void Terminate();
+
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
+
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
+
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+
+  virtual const char *GetName() const;
+
+ private:
+  bool IsInitialized() { return initialized_; }
+
+  bool EnumerateDevices(SoundDeviceLocatorList *devices,
+                        bool capture_not_playback);
+
+  bool GetDefaultDevice(SoundDeviceLocator **device);
+
+  static size_t FrameSize(const OpenParams &params);
+
+  template <typename StreamInterface>
+  StreamInterface *OpenDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params,
+      snd_pcm_stream_t type,
+      StreamInterface *(AlsaSoundSystem::*start_fn)(
+          snd_pcm_t *handle,
+          size_t frame_size,
+          int wait_timeout_ms,
+          int flags,
+          int freq));
+
+  SoundOutputStreamInterface *StartOutputStream(
+      snd_pcm_t *handle,
+      size_t frame_size,
+      int wait_timeout_ms,
+      int flags,
+      int freq);
+
+  SoundInputStreamInterface *StartInputStream(
+      snd_pcm_t *handle,
+      size_t frame_size,
+      int wait_timeout_ms,
+      int flags,
+      int freq);
+
+  const char *GetError(int err);
+
+  bool initialized_;
+  AlsaSymbolTable symbol_table_;
+
+  DISALLOW_COPY_AND_ASSIGN(AlsaSoundSystem);
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_ALSASOUNDSYSTEM_H_
diff --git a/sound/alsasymboltable.cc b/sound/alsasymboltable.cc
new file mode 100644
index 0000000..0670fb2
--- /dev/null
+++ b/sound/alsasymboltable.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/alsasymboltable.h"
+
+namespace rtc {
+
+#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME ALSA_SYMBOLS_CLASS_NAME
+#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST ALSA_SYMBOLS_LIST
+#define LATE_BINDING_SYMBOL_TABLE_DLL_NAME "libasound.so.2"
+#include "webrtc/base/latebindingsymboltable.cc.def"
+
+}  // namespace rtc
diff --git a/sound/alsasymboltable.h b/sound/alsasymboltable.h
new file mode 100644
index 0000000..2cc8dba
--- /dev/null
+++ b/sound/alsasymboltable.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_ALSASYMBOLTABLE_H_
+#define WEBRTC_SOUND_ALSASYMBOLTABLE_H_
+
+#include <alsa/asoundlib.h>
+
+#include "webrtc/base/latebindingsymboltable.h"
+
+namespace rtc {
+
+#define ALSA_SYMBOLS_CLASS_NAME AlsaSymbolTable
+// The ALSA symbols we need, as an X-Macro list.
+// This list must contain precisely every libasound function that is used in
+// alsasoundsystem.cc.
+#define ALSA_SYMBOLS_LIST \
+  X(snd_device_name_free_hint) \
+  X(snd_device_name_get_hint) \
+  X(snd_device_name_hint) \
+  X(snd_pcm_avail_update) \
+  X(snd_pcm_close) \
+  X(snd_pcm_delay) \
+  X(snd_pcm_drop) \
+  X(snd_pcm_open) \
+  X(snd_pcm_prepare) \
+  X(snd_pcm_readi) \
+  X(snd_pcm_recover) \
+  X(snd_pcm_set_params) \
+  X(snd_pcm_start) \
+  X(snd_pcm_stream) \
+  X(snd_pcm_wait) \
+  X(snd_pcm_writei) \
+  X(snd_strerror)
+
+#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME ALSA_SYMBOLS_CLASS_NAME
+#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST ALSA_SYMBOLS_LIST
+#include "webrtc/base/latebindingsymboltable.h.def"
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_ALSASYMBOLTABLE_H_
diff --git a/sound/automaticallychosensoundsystem.h b/sound/automaticallychosensoundsystem.h
new file mode 100644
index 0000000..84ff717
--- /dev/null
+++ b/sound/automaticallychosensoundsystem.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
+#define WEBRTC_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
+
+#include "webrtc/sound/soundsysteminterface.h"
+#include "webrtc/sound/soundsystemproxy.h"
+#include "webrtc/base/common.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+
+namespace rtc {
+
+// A function type that creates an instance of a sound system implementation.
+typedef SoundSystemInterface *(*SoundSystemCreator)();
+
+// An AutomaticallyChosenSoundSystem is a sound system proxy that defers to
+// an instance of the first sound system implementation in a list that
+// successfully initializes.
+template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
+class AutomaticallyChosenSoundSystem : public SoundSystemProxy {
+ public:
+  // Chooses and initializes the underlying sound system.
+  virtual bool Init();
+  // Terminates the underlying sound system implementation, but caches it for
+  // future re-use.
+  virtual void Terminate();
+
+  virtual const char *GetName() const;
+
+ private:
+  rtc::scoped_ptr<SoundSystemInterface> sound_systems_[kNumSoundSystems];
+};
+
+template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
+bool AutomaticallyChosenSoundSystem<kSoundSystemCreators,
+                                    kNumSoundSystems>::Init() {
+  if (wrapped_) {
+    return true;
+  }
+  for (int i = 0; i < kNumSoundSystems; ++i) {
+    if (!sound_systems_[i].get()) {
+      sound_systems_[i].reset((*kSoundSystemCreators[i])());
+    }
+    if (sound_systems_[i]->Init()) {
+      // This is the first sound system in the list to successfully
+      // initialize, so we're done.
+      wrapped_ = sound_systems_[i].get();
+      break;
+    }
+    // Else it failed to initialize, so try the remaining ones.
+  }
+  if (!wrapped_) {
+    LOG(LS_ERROR) << "Failed to find a usable sound system";
+    return false;
+  }
+  LOG(LS_INFO) << "Selected " << wrapped_->GetName() << " sound system";
+  return true;
+}
+
+template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
+void AutomaticallyChosenSoundSystem<kSoundSystemCreators,
+                                    kNumSoundSystems>::Terminate() {
+  if (!wrapped_) {
+    return;
+  }
+  wrapped_->Terminate();
+  wrapped_ = NULL;
+  // We do not free the scoped_ptrs because we may be re-init'ed soon.
+}
+
+template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
+const char *AutomaticallyChosenSoundSystem<kSoundSystemCreators,
+                                           kNumSoundSystems>::GetName() const {
+  return wrapped_ ? wrapped_->GetName() : "automatic";
+}
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
diff --git a/sound/automaticallychosensoundsystem_unittest.cc b/sound/automaticallychosensoundsystem_unittest.cc
new file mode 100644
index 0000000..5cfd7c6
--- /dev/null
+++ b/sound/automaticallychosensoundsystem_unittest.cc
@@ -0,0 +1,197 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/automaticallychosensoundsystem.h"
+#include "webrtc/sound/nullsoundsystem.h"
+#include "webrtc/base/gunit.h"
+
+namespace rtc {
+
+class NeverFailsToFailSoundSystem : public NullSoundSystem {
+ public:
+  // Overrides superclass.
+  virtual bool Init() {
+    return false;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new NeverFailsToFailSoundSystem();
+  }
+};
+
+class InitCheckingSoundSystem1 : public NullSoundSystem {
+ public:
+  // Overrides superclass.
+  virtual bool Init() {
+    created_ = true;
+    return true;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new InitCheckingSoundSystem1();
+  }
+
+  static bool created_;
+};
+
+bool InitCheckingSoundSystem1::created_ = false;
+
+class InitCheckingSoundSystem2 : public NullSoundSystem {
+ public:
+  // Overrides superclass.
+  virtual bool Init() {
+    created_ = true;
+    return true;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new InitCheckingSoundSystem2();
+  }
+
+  static bool created_;
+};
+
+bool InitCheckingSoundSystem2::created_ = false;
+
+class DeletionCheckingSoundSystem1 : public NeverFailsToFailSoundSystem {
+ public:
+  virtual ~DeletionCheckingSoundSystem1() {
+    deleted_ = true;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new DeletionCheckingSoundSystem1();
+  }
+
+  static bool deleted_;
+};
+
+bool DeletionCheckingSoundSystem1::deleted_ = false;
+
+class DeletionCheckingSoundSystem2 : public NeverFailsToFailSoundSystem {
+ public:
+  virtual ~DeletionCheckingSoundSystem2() {
+    deleted_ = true;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new DeletionCheckingSoundSystem2();
+  }
+
+  static bool deleted_;
+};
+
+bool DeletionCheckingSoundSystem2::deleted_ = false;
+
+class DeletionCheckingSoundSystem3 : public NullSoundSystem {
+ public:
+  virtual ~DeletionCheckingSoundSystem3() {
+    deleted_ = true;
+  }
+
+  static SoundSystemInterface *Create() {
+    return new DeletionCheckingSoundSystem3();
+  }
+
+  static bool deleted_;
+};
+
+bool DeletionCheckingSoundSystem3::deleted_ = false;
+
+extern const SoundSystemCreator kSingleSystemFailingCreators[] = {
+  &NeverFailsToFailSoundSystem::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, SingleSystemFailing) {
+  AutomaticallyChosenSoundSystem<
+      kSingleSystemFailingCreators,
+      ARRAY_SIZE(kSingleSystemFailingCreators)> sound_system;
+  EXPECT_FALSE(sound_system.Init());
+}
+
+extern const SoundSystemCreator kSingleSystemSucceedingCreators[] = {
+  &NullSoundSystem::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, SingleSystemSucceeding) {
+  AutomaticallyChosenSoundSystem<
+      kSingleSystemSucceedingCreators,
+      ARRAY_SIZE(kSingleSystemSucceedingCreators)> sound_system;
+  EXPECT_TRUE(sound_system.Init());
+}
+
+extern const SoundSystemCreator
+    kFailedFirstSystemResultsInUsingSecondCreators[] = {
+  &NeverFailsToFailSoundSystem::Create,
+  &NullSoundSystem::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, FailedFirstSystemResultsInUsingSecond) {
+  AutomaticallyChosenSoundSystem<
+      kFailedFirstSystemResultsInUsingSecondCreators,
+      ARRAY_SIZE(kFailedFirstSystemResultsInUsingSecondCreators)> sound_system;
+  EXPECT_TRUE(sound_system.Init());
+}
+
+extern const SoundSystemCreator kEarlierEntriesHavePriorityCreators[] = {
+  &InitCheckingSoundSystem1::Create,
+  &InitCheckingSoundSystem2::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, EarlierEntriesHavePriority) {
+  AutomaticallyChosenSoundSystem<
+      kEarlierEntriesHavePriorityCreators,
+      ARRAY_SIZE(kEarlierEntriesHavePriorityCreators)> sound_system;
+  InitCheckingSoundSystem1::created_ = false;
+  InitCheckingSoundSystem2::created_ = false;
+  EXPECT_TRUE(sound_system.Init());
+  EXPECT_TRUE(InitCheckingSoundSystem1::created_);
+  EXPECT_FALSE(InitCheckingSoundSystem2::created_);
+}
+
+extern const SoundSystemCreator kManySoundSystemsCreators[] = {
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+  &NullSoundSystem::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, ManySoundSystems) {
+  AutomaticallyChosenSoundSystem<
+      kManySoundSystemsCreators,
+      ARRAY_SIZE(kManySoundSystemsCreators)> sound_system;
+  EXPECT_TRUE(sound_system.Init());
+}
+
+extern const SoundSystemCreator kDeletesAllCreatedSoundSystemsCreators[] = {
+  &DeletionCheckingSoundSystem1::Create,
+  &DeletionCheckingSoundSystem2::Create,
+  &DeletionCheckingSoundSystem3::Create,
+};
+
+TEST(AutomaticallyChosenSoundSystem, DeletesAllCreatedSoundSystems) {
+  typedef AutomaticallyChosenSoundSystem<
+      kDeletesAllCreatedSoundSystemsCreators,
+      ARRAY_SIZE(kDeletesAllCreatedSoundSystemsCreators)> TestSoundSystem;
+  TestSoundSystem *sound_system = new TestSoundSystem();
+  DeletionCheckingSoundSystem1::deleted_ = false;
+  DeletionCheckingSoundSystem2::deleted_ = false;
+  DeletionCheckingSoundSystem3::deleted_ = false;
+  EXPECT_TRUE(sound_system->Init());
+  delete sound_system;
+  EXPECT_TRUE(DeletionCheckingSoundSystem1::deleted_);
+  EXPECT_TRUE(DeletionCheckingSoundSystem2::deleted_);
+  EXPECT_TRUE(DeletionCheckingSoundSystem3::deleted_);
+}
+
+}  // namespace rtc
diff --git a/sound/linuxsoundsystem.cc b/sound/linuxsoundsystem.cc
new file mode 100644
index 0000000..c420ab5
--- /dev/null
+++ b/sound/linuxsoundsystem.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/linuxsoundsystem.h"
+
+#include "webrtc/sound/alsasoundsystem.h"
+#include "webrtc/sound/pulseaudiosoundsystem.h"
+
+namespace rtc {
+
+const SoundSystemCreator kLinuxSoundSystemCreators[] = {
+#ifdef HAVE_LIBPULSE
+  &PulseAudioSoundSystem::Create,
+#endif
+  &AlsaSoundSystem::Create,
+};
+
+}  // namespace rtc
diff --git a/sound/linuxsoundsystem.h b/sound/linuxsoundsystem.h
new file mode 100644
index 0000000..0016f8a
--- /dev/null
+++ b/sound/linuxsoundsystem.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_LINUXSOUNDSYSTEM_H_
+#define WEBRTC_SOUND_LINUXSOUNDSYSTEM_H_
+
+#include "webrtc/sound/automaticallychosensoundsystem.h"
+
+namespace rtc {
+
+extern const SoundSystemCreator kLinuxSoundSystemCreators[
+#ifdef HAVE_LIBPULSE
+    2
+#else
+    1
+#endif
+    ];
+
+// The vast majority of Linux systems use ALSA for the device-level sound API,
+// but an increasing number are using PulseAudio for the application API and
+// only using ALSA internally in PulseAudio itself. But like everything on
+// Linux this is user-configurable, so we need to support both and choose the
+// right one at run-time.
+// PulseAudioSoundSystem is designed to only successfully initialize if
+// PulseAudio is installed and running, and if it is running then direct device
+// access using ALSA typically won't work, so if PulseAudioSoundSystem
+// initializes then we choose that. Otherwise we choose ALSA.
+typedef AutomaticallyChosenSoundSystem<
+    kLinuxSoundSystemCreators,
+    ARRAY_SIZE(kLinuxSoundSystemCreators)> LinuxSoundSystem;
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_LINUXSOUNDSYSTEM_H_
diff --git a/sound/nullsoundsystem.cc b/sound/nullsoundsystem.cc
new file mode 100644
index 0000000..962f410
--- /dev/null
+++ b/sound/nullsoundsystem.cc
@@ -0,0 +1,157 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/nullsoundsystem.h"
+
+#include "webrtc/sound/sounddevicelocator.h"
+#include "webrtc/sound/soundinputstreaminterface.h"
+#include "webrtc/sound/soundoutputstreaminterface.h"
+#include "webrtc/base/logging.h"
+
+namespace rtc {
+
+class Thread;
+
+}
+
+namespace rtc {
+
+// Name used for the single device and the sound system itself.
+static const char kNullName[] = "null";
+
+class NullSoundDeviceLocator : public SoundDeviceLocator {
+ public:
+  NullSoundDeviceLocator() : SoundDeviceLocator(kNullName, kNullName) {}
+
+  virtual SoundDeviceLocator *Copy() const {
+    return new NullSoundDeviceLocator();
+  }
+};
+
+class NullSoundInputStream : public SoundInputStreamInterface {
+ public:
+  virtual bool StartReading() {
+    return true;
+  }
+
+  virtual bool StopReading() {
+    return true;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    *volume = SoundSystemInterface::kMinVolume;
+    return true;
+  }
+
+  virtual bool SetVolume(int volume) {
+    return false;
+  }
+
+  virtual bool Close() {
+    return true;
+  }
+
+  virtual int LatencyUsecs() {
+    return 0;
+  }
+};
+
+class NullSoundOutputStream : public SoundOutputStreamInterface {
+ public:
+  virtual bool EnableBufferMonitoring() {
+    return true;
+  }
+
+  virtual bool DisableBufferMonitoring() {
+    return true;
+  }
+
+  virtual bool WriteSamples(const void *sample_data,
+                            size_t size) {
+    LOG(LS_VERBOSE) << "Got " << size << " bytes of playback samples";
+    return true;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    *volume = SoundSystemInterface::kMinVolume;
+    return true;
+  }
+
+  virtual bool SetVolume(int volume) {
+    return false;
+  }
+
+  virtual bool Close() {
+    return true;
+  }
+
+  virtual int LatencyUsecs() {
+    return 0;
+  }
+};
+
+NullSoundSystem::~NullSoundSystem() {
+}
+
+bool NullSoundSystem::Init() {
+  return true;
+}
+
+void NullSoundSystem::Terminate() {
+  // Nothing to do.
+}
+
+bool NullSoundSystem::EnumeratePlaybackDevices(
+      SoundSystemInterface::SoundDeviceLocatorList *devices) {
+  ClearSoundDeviceLocatorList(devices);
+  SoundDeviceLocator *device;
+  GetDefaultPlaybackDevice(&device);
+  devices->push_back(device);
+  return true;
+}
+
+bool NullSoundSystem::EnumerateCaptureDevices(
+      SoundSystemInterface::SoundDeviceLocatorList *devices) {
+  ClearSoundDeviceLocatorList(devices);
+  SoundDeviceLocator *device;
+  GetDefaultCaptureDevice(&device);
+  devices->push_back(device);
+  return true;
+}
+
+bool NullSoundSystem::GetDefaultPlaybackDevice(
+    SoundDeviceLocator **device) {
+  *device = new NullSoundDeviceLocator();
+  return true;
+}
+
+bool NullSoundSystem::GetDefaultCaptureDevice(
+    SoundDeviceLocator **device) {
+  *device = new NullSoundDeviceLocator();
+  return true;
+}
+
+SoundOutputStreamInterface *NullSoundSystem::OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) {
+  return new NullSoundOutputStream();
+}
+
+SoundInputStreamInterface *NullSoundSystem::OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) {
+  return new NullSoundInputStream();
+}
+
+const char *NullSoundSystem::GetName() const {
+  return kNullName;
+}
+
+}  // namespace rtc
diff --git a/sound/nullsoundsystem.h b/sound/nullsoundsystem.h
new file mode 100644
index 0000000..6b74997
--- /dev/null
+++ b/sound/nullsoundsystem.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_NULLSOUNDSYSTEM_H_
+#define WEBRTC_SOUND_NULLSOUNDSYSTEM_H_
+
+#include "webrtc/sound/soundsysteminterface.h"
+
+namespace rtc {
+
+class SoundDeviceLocator;
+class SoundInputStreamInterface;
+class SoundOutputStreamInterface;
+
+// A simple reference sound system that drops output samples and generates
+// no input samples.
+class NullSoundSystem : public SoundSystemInterface {
+ public:
+  static SoundSystemInterface *Create() {
+    return new NullSoundSystem();
+  }
+
+  virtual ~NullSoundSystem();
+
+  virtual bool Init();
+  virtual void Terminate();
+
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
+
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
+
+  virtual const char *GetName() const;
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_NULLSOUNDSYSTEM_H_
diff --git a/sound/nullsoundsystemfactory.cc b/sound/nullsoundsystemfactory.cc
new file mode 100644
index 0000000..f35b6e7
--- /dev/null
+++ b/sound/nullsoundsystemfactory.cc
@@ -0,0 +1,32 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/nullsoundsystemfactory.h"
+
+#include "webrtc/sound/nullsoundsystem.h"
+
+namespace rtc {
+
+NullSoundSystemFactory::NullSoundSystemFactory() {
+}
+
+NullSoundSystemFactory::~NullSoundSystemFactory() {
+}
+
+bool NullSoundSystemFactory::SetupInstance() {
+  instance_.reset(new NullSoundSystem());
+  return true;
+}
+
+void NullSoundSystemFactory::CleanupInstance() {
+  instance_.reset();
+}
+
+}  // namespace rtc
diff --git a/sound/nullsoundsystemfactory.h b/sound/nullsoundsystemfactory.h
new file mode 100644
index 0000000..8bdb463
--- /dev/null
+++ b/sound/nullsoundsystemfactory.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_NULLSOUNDSYSTEMFACTORY_H_
+#define WEBRTC_SOUND_NULLSOUNDSYSTEMFACTORY_H_
+
+#include "webrtc/sound/soundsystemfactory.h"
+
+namespace rtc {
+
+// A SoundSystemFactory that always returns a NullSoundSystem. Intended for
+// testing.
+class NullSoundSystemFactory : public SoundSystemFactory {
+ public:
+  NullSoundSystemFactory();
+  virtual ~NullSoundSystemFactory();
+
+ protected:
+  // Inherited from SoundSystemFactory.
+  virtual bool SetupInstance();
+  virtual void CleanupInstance();
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_NULLSOUNDSYSTEMFACTORY_H_
diff --git a/sound/platformsoundsystem.cc b/sound/platformsoundsystem.cc
new file mode 100644
index 0000000..e4d7102
--- /dev/null
+++ b/sound/platformsoundsystem.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/platformsoundsystem.h"
+
+#include "webrtc/base/common.h"
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+#include "webrtc/sound/linuxsoundsystem.h"
+#else
+#include "webrtc/sound/nullsoundsystem.h"
+#endif
+
+namespace rtc {
+
+SoundSystemInterface *CreatePlatformSoundSystem() {
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+  return new LinuxSoundSystem();
+#else
+  ASSERT(false && "Not implemented");
+  return new NullSoundSystem();
+#endif
+}
+
+}  // namespace rtc
diff --git a/sound/platformsoundsystem.h b/sound/platformsoundsystem.h
new file mode 100644
index 0000000..40eff26
--- /dev/null
+++ b/sound/platformsoundsystem.h
@@ -0,0 +1,23 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_PLATFORMSOUNDSYSTEM_H_
+#define WEBRTC_SOUND_PLATFORMSOUNDSYSTEM_H_
+
+namespace rtc {
+
+class SoundSystemInterface;
+
+// Creates the sound system implementation for this platform.
+SoundSystemInterface *CreatePlatformSoundSystem();
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_PLATFORMSOUNDSYSTEM_H_
diff --git a/sound/platformsoundsystemfactory.cc b/sound/platformsoundsystemfactory.cc
new file mode 100644
index 0000000..a229236
--- /dev/null
+++ b/sound/platformsoundsystemfactory.cc
@@ -0,0 +1,40 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/platformsoundsystemfactory.h"
+
+#include "webrtc/sound/platformsoundsystem.h"
+#include "webrtc/sound/soundsysteminterface.h"
+
+namespace rtc {
+
+PlatformSoundSystemFactory::PlatformSoundSystemFactory() {
+}
+
+PlatformSoundSystemFactory::~PlatformSoundSystemFactory() {
+}
+
+bool PlatformSoundSystemFactory::SetupInstance() {
+  if (!instance_.get()) {
+    instance_.reset(CreatePlatformSoundSystem());
+  }
+  if (!instance_->Init()) {
+    LOG(LS_ERROR) << "Can't initialize platform's sound system";
+    return false;
+  }
+  return true;
+}
+
+void PlatformSoundSystemFactory::CleanupInstance() {
+  instance_->Terminate();
+  // We do not delete the sound system because we might be re-initialized soon.
+}
+
+}  // namespace rtc
diff --git a/sound/platformsoundsystemfactory.h b/sound/platformsoundsystemfactory.h
new file mode 100644
index 0000000..c5105ef
--- /dev/null
+++ b/sound/platformsoundsystemfactory.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
+#define WEBRTC_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
+
+#include "webrtc/sound/soundsystemfactory.h"
+
+namespace rtc {
+
+// A SoundSystemFactory that returns the platform's native sound system
+// implementation.
+class PlatformSoundSystemFactory : public SoundSystemFactory {
+ public:
+  PlatformSoundSystemFactory();
+  virtual ~PlatformSoundSystemFactory();
+
+ protected:
+  // Inherited from SoundSystemFactory.
+  virtual bool SetupInstance();
+  virtual void CleanupInstance();
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
+
+
diff --git a/sound/pulseaudiosoundsystem.cc b/sound/pulseaudiosoundsystem.cc
new file mode 100644
index 0000000..e063e17
--- /dev/null
+++ b/sound/pulseaudiosoundsystem.cc
@@ -0,0 +1,1542 @@
+/*
+ *  Copyright 2010 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/pulseaudiosoundsystem.h"
+
+#ifdef HAVE_LIBPULSE
+
+#include "webrtc/sound/sounddevicelocator.h"
+#include "webrtc/sound/soundinputstreaminterface.h"
+#include "webrtc/sound/soundoutputstreaminterface.h"
+#include "webrtc/base/common.h"
+#include "webrtc/base/fileutils.h"  // for GetApplicationName()
+#include "webrtc/base/logging.h"
+#include "webrtc/base/timeutils.h"
+#include "webrtc/base/worker.h"
+
+namespace rtc {
+
+// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY.
+static const uint32_t kAdjustLatencyProtocolVersion = 13;
+
+// Lookup table from the rtc format enum in soundsysteminterface.h to
+// Pulse's enums.
+static const pa_sample_format_t kCricketFormatToPulseFormatTable[] = {
+  // The order here must match the order in soundsysteminterface.h
+  PA_SAMPLE_S16LE,
+};
+
+// Some timing constants for optimal operation. See
+// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html
+// for a good explanation of some of the factors that go into this.
+
+// Playback.
+
+// For playback, there is a round-trip delay to fill the server-side playback
+// buffer, so setting too low of a latency is a buffer underflow risk. We will
+// automatically increase the latency if a buffer underflow does occur, but we
+// also enforce a sane minimum at start-up time. Anything lower would be
+// virtually guaranteed to underflow at least once, so there's no point in
+// allowing lower latencies.
+static const int kPlaybackLatencyMinimumMsecs = 20;
+// Every time a playback stream underflows, we will reconfigure it with target
+// latency that is greater by this amount.
+static const int kPlaybackLatencyIncrementMsecs = 20;
+// We also need to configure a suitable request size. Too small and we'd burn
+// CPU from the overhead of transfering small amounts of data at once. Too large
+// and the amount of data remaining in the buffer right before refilling it
+// would be a buffer underflow risk. We set it to half of the buffer size.
+static const int kPlaybackRequestFactor = 2;
+
+// Capture.
+
+// For capture, low latency is not a buffer overflow risk, but it makes us burn
+// CPU from the overhead of transfering small amounts of data at once, so we set
+// a recommended value that we use for the kLowLatency constant (but if the user
+// explicitly requests something lower then we will honour it).
+// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%.
+static const int kLowCaptureLatencyMsecs = 10;
+// There is a round-trip delay to ack the data to the server, so the
+// server-side buffer needs extra space to prevent buffer overflow. 20ms is
+// sufficient, but there is no penalty to making it bigger, so we make it huge.
+// (750ms is libpulse's default value for the _total_ buffer size in the
+// kNoLatencyRequirements case.)
+static const int kCaptureBufferExtraMsecs = 750;
+
+static void FillPlaybackBufferAttr(int latency,
+                                   pa_buffer_attr *attr) {
+  attr->maxlength = latency;
+  attr->tlength = latency;
+  attr->minreq = latency / kPlaybackRequestFactor;
+  attr->prebuf = attr->tlength - attr->minreq;
+  LOG(LS_VERBOSE) << "Configuring latency = " << attr->tlength << ", minreq = "
+                  << attr->minreq << ", minfill = " << attr->prebuf;
+}
+
+static pa_volume_t CricketVolumeToPulseVolume(int volume) {
+  // PA's volume space goes from 0% at PA_VOLUME_MUTED (value 0) to 100% at
+  // PA_VOLUME_NORM (value 0x10000). It can also go beyond 100% up to
+  // PA_VOLUME_MAX (value UINT32_MAX-1), but using that is probably unwise.
+  // We just linearly map the 0-255 scale of SoundSystemInterface onto
+  // PA_VOLUME_MUTED-PA_VOLUME_NORM. If the programmer exceeds kMaxVolume then
+  // they can access the over-100% features of PA.
+  return PA_VOLUME_MUTED + (PA_VOLUME_NORM - PA_VOLUME_MUTED) *
+      volume / SoundSystemInterface::kMaxVolume;
+}
+
+static int PulseVolumeToCricketVolume(pa_volume_t pa_volume) {
+  return SoundSystemInterface::kMinVolume +
+      (SoundSystemInterface::kMaxVolume - SoundSystemInterface::kMinVolume) *
+      pa_volume / PA_VOLUME_NORM;
+}
+
+static pa_volume_t MaxChannelVolume(pa_cvolume *channel_volumes) {
+  pa_volume_t pa_volume = PA_VOLUME_MUTED;  // Minimum possible value.
+  for (int i = 0; i < channel_volumes->channels; ++i) {
+    if (pa_volume < channel_volumes->values[i]) {
+      pa_volume = channel_volumes->values[i];
+    }
+  }
+  return pa_volume;
+}
+
+class PulseAudioDeviceLocator : public SoundDeviceLocator {
+ public:
+  PulseAudioDeviceLocator(const std::string &name,
+                          const std::string &device_name)
+      : SoundDeviceLocator(name, device_name) {
+  }
+
+  virtual SoundDeviceLocator *Copy() const {
+    return new PulseAudioDeviceLocator(*this);
+  }
+};
+
+// Functionality that is common to both PulseAudioInputStream and
+// PulseAudioOutputStream.
+class PulseAudioStream {
+ public:
+  PulseAudioStream(PulseAudioSoundSystem *pulse, pa_stream *stream, int flags)
+      : pulse_(pulse), stream_(stream), flags_(flags) {
+  }
+
+  ~PulseAudioStream() {
+    // Close() should have been called during the containing class's destructor.
+    ASSERT(stream_ == NULL);
+  }
+
+  // Must be called with the lock held.
+  bool Close() {
+    if (!IsClosed()) {
+      // Unset this here so that we don't get a TERMINATED callback.
+      symbol_table()->pa_stream_set_state_callback()(stream_, NULL, NULL);
+      if (symbol_table()->pa_stream_disconnect()(stream_) != 0) {
+        LOG(LS_ERROR) << "Can't disconnect stream";
+        // Continue and return true anyways.
+      }
+      symbol_table()->pa_stream_unref()(stream_);
+      stream_ = NULL;
+    }
+    return true;
+  }
+
+  // Must be called with the lock held.
+  int LatencyUsecs() {
+    if (!(flags_ & SoundSystemInterface::FLAG_REPORT_LATENCY)) {
+      return 0;
+    }
+
+    pa_usec_t latency;
+    int negative;
+    Lock();
+    int re = symbol_table()->pa_stream_get_latency()(stream_, &latency,
+        &negative);
+    Unlock();
+    if (re != 0) {
+      LOG(LS_ERROR) << "Can't query latency";
+      // We'd rather continue playout/capture with an incorrect delay than stop
+      // it altogether, so return a valid value.
+      return 0;
+    }
+    if (negative) {
+      // The delay can be negative for monitoring streams if the captured
+      // samples haven't been played yet. In such a case, "latency" contains the
+      // magnitude, so we must negate it to get the real value.
+      return -latency;
+    } else {
+      return latency;
+    }
+  }
+
+  PulseAudioSoundSystem *pulse() {
+    return pulse_;
+  }
+
+  PulseAudioSymbolTable *symbol_table() {
+    return &pulse()->symbol_table_;
+  }
+
+  pa_stream *stream() {
+    ASSERT(stream_ != NULL);
+    return stream_;
+  }
+
+  bool IsClosed() {
+    return stream_ == NULL;
+  }
+
+  void Lock() {
+    pulse()->Lock();
+  }
+
+  void Unlock() {
+    pulse()->Unlock();
+  }
+
+ private:
+  PulseAudioSoundSystem *pulse_;
+  pa_stream *stream_;
+  int flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(PulseAudioStream);
+};
+
+// Implementation of an input stream. See soundinputstreaminterface.h regarding
+// thread-safety.
+class PulseAudioInputStream :
+    public SoundInputStreamInterface,
+    private rtc::Worker {
+
+  struct GetVolumeCallbackData {
+    PulseAudioInputStream *instance;
+    pa_cvolume *channel_volumes;
+  };
+
+  struct GetSourceChannelCountCallbackData {
+    PulseAudioInputStream *instance;
+    uint8_t *channels;
+  };
+
+ public:
+  PulseAudioInputStream(PulseAudioSoundSystem *pulse,
+                        pa_stream *stream,
+                        int flags)
+      : stream_(pulse, stream, flags),
+        temp_sample_data_(NULL),
+        temp_sample_data_size_(0) {
+    // This callback seems to never be issued, but let's set it anyways.
+    symbol_table()->pa_stream_set_overflow_callback()(stream, &OverflowCallback,
+        NULL);
+  }
+
+  virtual ~PulseAudioInputStream() {
+    bool success = Close();
+    // We need that to live.
+    VERIFY(success);
+  }
+
+  virtual bool StartReading() {
+    return StartWork();
+  }
+
+  virtual bool StopReading() {
+    return StopWork();
+  }
+
+  virtual bool GetVolume(int *volume) {
+    bool ret = false;
+
+    Lock();
+
+    // Unlike output streams, input streams have no concept of a stream volume,
+    // only a device volume. So we have to retrieve the volume of the device
+    // itself.
+
+    pa_cvolume channel_volumes;
+
+    GetVolumeCallbackData data;
+    data.instance = this;
+    data.channel_volumes = &channel_volumes;
+
+    pa_operation *op = symbol_table()->pa_context_get_source_info_by_index()(
+            stream_.pulse()->context_,
+            symbol_table()->pa_stream_get_device_index()(stream_.stream()),
+            &GetVolumeCallbackThunk,
+            &data);
+    if (!stream_.pulse()->FinishOperation(op)) {
+      goto done;
+    }
+
+    if (data.channel_volumes) {
+      // This pointer was never unset by the callback, so we must have received
+      // an empty list of infos. This probably never happens, but we code for it
+      // anyway.
+      LOG(LS_ERROR) << "Did not receive GetVolumeCallback";
+      goto done;
+    }
+
+    // We now have the volume for each channel. Each channel could have a
+    // different volume if, e.g., the user went and changed the volumes in the
+    // PA UI. To get a single volume for SoundSystemInterface we just take the
+    // maximum. Ideally we'd do so with pa_cvolume_max, but it doesn't exist in
+    // Hardy, so we do it manually.
+    pa_volume_t pa_volume;
+    pa_volume = MaxChannelVolume(&channel_volumes);
+    // Now map onto the SoundSystemInterface range.
+    *volume = PulseVolumeToCricketVolume(pa_volume);
+
+    ret = true;
+   done:
+    Unlock();
+    return ret;
+  }
+
+  virtual bool SetVolume(int volume) {
+    bool ret = false;
+    pa_volume_t pa_volume = CricketVolumeToPulseVolume(volume);
+
+    Lock();
+
+    // Unlike output streams, input streams have no concept of a stream volume,
+    // only a device volume. So we have to change the volume of the device
+    // itself.
+
+    // The device may have a different number of channels than the stream and
+    // their mapping may be different, so we don't want to use the channel count
+    // from our sample spec. We could use PA_CHANNELS_MAX to cover our bases,
+    // and the server allows that even if the device's channel count is lower,
+    // but some buggy PA clients don't like that (the pavucontrol on Hardy dies
+    // in an assert if the channel count is different). So instead we look up
+    // the actual number of channels that the device has.
+
+    uint8_t channels;
+
+    GetSourceChannelCountCallbackData data;
+    data.instance = this;
+    data.channels = &channels;
+
+    uint32_t device_index = symbol_table()->pa_stream_get_device_index()(
+        stream_.stream());
+
+    pa_operation *op = symbol_table()->pa_context_get_source_info_by_index()(
+        stream_.pulse()->context_,
+        device_index,
+        &GetSourceChannelCountCallbackThunk,
+        &data);
+    if (!stream_.pulse()->FinishOperation(op)) {
+      goto done;
+    }
+
+    if (data.channels) {
+      // This pointer was never unset by the callback, so we must have received
+      // an empty list of infos. This probably never happens, but we code for it
+      // anyway.
+      LOG(LS_ERROR) << "Did not receive GetSourceChannelCountCallback";
+      goto done;
+    }
+
+    pa_cvolume channel_volumes;
+    symbol_table()->pa_cvolume_set()(&channel_volumes, channels, pa_volume);
+
+    op = symbol_table()->pa_context_set_source_volume_by_index()(
+        stream_.pulse()->context_,
+        device_index,
+        &channel_volumes,
+        // This callback merely logs errors.
+        &SetVolumeCallback,
+        NULL);
+    if (!op) {
+      LOG(LS_ERROR) << "pa_context_set_source_volume_by_index()";
+      goto done;
+    }
+    // Don't need to wait for this to complete.
+    symbol_table()->pa_operation_unref()(op);
+
+    ret = true;
+   done:
+    Unlock();
+    return ret;
+  }
+
+  virtual bool Close() {
+    if (!StopReading()) {
+      return false;
+    }
+    bool ret = true;
+    if (!stream_.IsClosed()) {
+      Lock();
+      ret = stream_.Close();
+      Unlock();
+    }
+    return ret;
+  }
+
+  virtual int LatencyUsecs() {
+    return stream_.LatencyUsecs();
+  }
+
+ private:
+  void Lock() {
+    stream_.Lock();
+  }
+
+  void Unlock() {
+    stream_.Unlock();
+  }
+
+  PulseAudioSymbolTable *symbol_table() {
+    return stream_.symbol_table();
+  }
+
+  void EnableReadCallback() {
+    symbol_table()->pa_stream_set_read_callback()(
+         stream_.stream(),
+         &ReadCallbackThunk,
+         this);
+  }
+
+  void DisableReadCallback() {
+    symbol_table()->pa_stream_set_read_callback()(
+         stream_.stream(),
+         NULL,
+         NULL);
+  }
+
+  static void ReadCallbackThunk(pa_stream *unused1,
+                                size_t unused2,
+                                void *userdata) {
+    PulseAudioInputStream *instance =
+        static_cast<PulseAudioInputStream *>(userdata);
+    instance->OnReadCallback();
+  }
+
+  void OnReadCallback() {
+    // We get the data pointer and size now in order to save one Lock/Unlock
+    // on OnMessage.
+    if (symbol_table()->pa_stream_peek()(stream_.stream(),
+                                         &temp_sample_data_,
+                                         &temp_sample_data_size_) != 0) {
+      LOG(LS_ERROR) << "Can't read data!";
+      return;
+    }
+    // Since we consume the data asynchronously on a different thread, we have
+    // to temporarily disable the read callback or else Pulse will call it
+    // continuously until we consume the data. We re-enable it below.
+    DisableReadCallback();
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStart() {
+    Lock();
+    EnableReadCallback();
+    Unlock();
+  }
+
+  // Inherited from Worker.
+  virtual void OnHaveWork() {
+    ASSERT(temp_sample_data_ && temp_sample_data_size_);
+    SignalSamplesRead(temp_sample_data_,
+                      temp_sample_data_size_,
+                      this);
+    temp_sample_data_ = NULL;
+    temp_sample_data_size_ = 0;
+
+    Lock();
+    for (;;) {
+      // Ack the last thing we read.
+      if (symbol_table()->pa_stream_drop()(stream_.stream()) != 0) {
+        LOG(LS_ERROR) << "Can't ack read data";
+      }
+
+      if (symbol_table()->pa_stream_readable_size()(stream_.stream()) <= 0) {
+        // Then that was all the data.
+        break;
+      }
+
+      // Else more data.
+      const void *sample_data;
+      size_t sample_data_size;
+      if (symbol_table()->pa_stream_peek()(stream_.stream(),
+                                           &sample_data,
+                                           &sample_data_size) != 0) {
+        LOG(LS_ERROR) << "Can't read data!";
+        break;
+      }
+
+      // Drop lock for sigslot dispatch, which could take a while.
+      Unlock();
+      SignalSamplesRead(sample_data, sample_data_size, this);
+      Lock();
+
+      // Return to top of loop for the ack and the check for more data.
+    }
+    EnableReadCallback();
+    Unlock();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStop() {
+    Lock();
+    DisableReadCallback();
+    Unlock();
+  }
+
+  static void OverflowCallback(pa_stream *stream,
+                               void *userdata) {
+    LOG(LS_WARNING) << "Buffer overflow on capture stream " << stream;
+  }
+
+  static void GetVolumeCallbackThunk(pa_context *unused,
+                                     const pa_source_info *info,
+                                     int eol,
+                                     void *userdata) {
+    GetVolumeCallbackData *data =
+        static_cast<GetVolumeCallbackData *>(userdata);
+    data->instance->OnGetVolumeCallback(info, eol, &data->channel_volumes);
+  }
+
+  void OnGetVolumeCallback(const pa_source_info *info,
+                           int eol,
+                           pa_cvolume **channel_volumes) {
+    if (eol) {
+      // List is over. Wake GetVolume().
+      stream_.pulse()->Signal();
+      return;
+    }
+
+    if (*channel_volumes) {
+      **channel_volumes = info->volume;
+      // Unset the pointer so that we know that we have have already copied the
+      // volume.
+      *channel_volumes = NULL;
+    } else {
+      // We have received an additional callback after the first one, which
+      // doesn't make sense for a single source. This probably never happens,
+      // but we code for it anyway.
+      LOG(LS_WARNING) << "Ignoring extra GetVolumeCallback";
+    }
+  }
+
+  static void GetSourceChannelCountCallbackThunk(pa_context *unused,
+                                                 const pa_source_info *info,
+                                                 int eol,
+                                                 void *userdata) {
+    GetSourceChannelCountCallbackData *data =
+        static_cast<GetSourceChannelCountCallbackData *>(userdata);
+    data->instance->OnGetSourceChannelCountCallback(info, eol, &data->channels);
+  }
+
+  void OnGetSourceChannelCountCallback(const pa_source_info *info,
+                                       int eol,
+                                       uint8_t **channels) {
+    if (eol) {
+      // List is over. Wake SetVolume().
+      stream_.pulse()->Signal();
+      return;
+    }
+
+    if (*channels) {
+      **channels = info->channel_map.channels;
+      // Unset the pointer so that we know that we have have already copied the
+      // channel count.
+      *channels = NULL;
+    } else {
+      // We have received an additional callback after the first one, which
+      // doesn't make sense for a single source. This probably never happens,
+      // but we code for it anyway.
+      LOG(LS_WARNING) << "Ignoring extra GetSourceChannelCountCallback";
+    }
+  }
+
+  static void SetVolumeCallback(pa_context *unused1,
+                                int success,
+                                void *unused2) {
+    if (!success) {
+      LOG(LS_ERROR) << "Failed to change capture volume";
+    }
+  }
+
+  PulseAudioStream stream_;
+  // Temporary storage for passing data between threads.
+  const void *temp_sample_data_;
+  size_t temp_sample_data_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
+};
+
+// Implementation of an output stream. See soundoutputstreaminterface.h
+// regarding thread-safety.
+class PulseAudioOutputStream :
+    public SoundOutputStreamInterface,
+    private rtc::Worker {
+
+  struct GetVolumeCallbackData {
+    PulseAudioOutputStream *instance;
+    pa_cvolume *channel_volumes;
+  };
+
+ public:
+  PulseAudioOutputStream(PulseAudioSoundSystem *pulse,
+                         pa_stream *stream,
+                         int flags,
+                         int latency)
+      : stream_(pulse, stream, flags),
+        configured_latency_(latency),
+        temp_buffer_space_(0) {
+    symbol_table()->pa_stream_set_underflow_callback()(stream,
+                                                       &UnderflowCallbackThunk,
+                                                       this);
+  }
+
+  virtual ~PulseAudioOutputStream() {
+    bool success = Close();
+    // We need that to live.
+    VERIFY(success);
+  }
+
+  virtual bool EnableBufferMonitoring() {
+    return StartWork();
+  }
+
+  virtual bool DisableBufferMonitoring() {
+    return StopWork();
+  }
+
+  virtual bool WriteSamples(const void *sample_data,
+                            size_t size) {
+    bool ret = true;
+    Lock();
+    if (symbol_table()->pa_stream_write()(stream_.stream(),
+                                          sample_data,
+                                          size,
+                                          NULL,
+                                          0,
+                                          PA_SEEK_RELATIVE) != 0) {
+      LOG(LS_ERROR) << "Unable to write";
+      ret = false;
+    }
+    Unlock();
+    return ret;
+  }
+
+  virtual bool GetVolume(int *volume) {
+    bool ret = false;
+
+    Lock();
+
+    pa_cvolume channel_volumes;
+
+    GetVolumeCallbackData data;
+    data.instance = this;
+    data.channel_volumes = &channel_volumes;
+
+    pa_operation *op = symbol_table()->pa_context_get_sink_input_info()(
+            stream_.pulse()->context_,
+            symbol_table()->pa_stream_get_index()(stream_.stream()),
+            &GetVolumeCallbackThunk,
+            &data);
+    if (!stream_.pulse()->FinishOperation(op)) {
+      goto done;
+    }
+
+    if (data.channel_volumes) {
+      // This pointer was never unset by the callback, so we must have received
+      // an empty list of infos. This probably never happens, but we code for it
+      // anyway.
+      LOG(LS_ERROR) << "Did not receive GetVolumeCallback";
+      goto done;
+    }
+
+    // We now have the volume for each channel. Each channel could have a
+    // different volume if, e.g., the user went and changed the volumes in the
+    // PA UI. To get a single volume for SoundSystemInterface we just take the
+    // maximum. Ideally we'd do so with pa_cvolume_max, but it doesn't exist in
+    // Hardy, so we do it manually.
+    pa_volume_t pa_volume;
+    pa_volume = MaxChannelVolume(&channel_volumes);
+    // Now map onto the SoundSystemInterface range.
+    *volume = PulseVolumeToCricketVolume(pa_volume);
+
+    ret = true;
+   done:
+    Unlock();
+    return ret;
+  }
+
+  virtual bool SetVolume(int volume) {
+    bool ret = false;
+    pa_volume_t pa_volume = CricketVolumeToPulseVolume(volume);
+
+    Lock();
+
+    const pa_sample_spec *spec = symbol_table()->pa_stream_get_sample_spec()(
+        stream_.stream());
+    if (!spec) {
+      LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+      goto done;
+    }
+
+    pa_cvolume channel_volumes;
+    symbol_table()->pa_cvolume_set()(&channel_volumes, spec->channels,
+        pa_volume);
+
+    pa_operation *op;
+    op = symbol_table()->pa_context_set_sink_input_volume()(
+        stream_.pulse()->context_,
+        symbol_table()->pa_stream_get_index()(stream_.stream()),
+        &channel_volumes,
+        // This callback merely logs errors.
+        &SetVolumeCallback,
+        NULL);
+    if (!op) {
+      LOG(LS_ERROR) << "pa_context_set_sink_input_volume()";
+      goto done;
+    }
+    // Don't need to wait for this to complete.
+    symbol_table()->pa_operation_unref()(op);
+
+    ret = true;
+   done:
+    Unlock();
+    return ret;
+  }
+
+  virtual bool Close() {
+    if (!DisableBufferMonitoring()) {
+      return false;
+    }
+    bool ret = true;
+    if (!stream_.IsClosed()) {
+      Lock();
+      symbol_table()->pa_stream_set_underflow_callback()(stream_.stream(),
+                                                         NULL,
+                                                         NULL);
+      ret = stream_.Close();
+      Unlock();
+    }
+    return ret;
+  }
+
+  virtual int LatencyUsecs() {
+    return stream_.LatencyUsecs();
+  }
+
+#if 0
+  // TODO: Versions 0.9.16 and later of Pulse have a new API for
+  // zero-copy writes, but Hardy is not new enough to have that so we can't
+  // rely on it. Perhaps auto-detect if it's present or not and use it if we
+  // can?
+
+  virtual bool GetWriteBuffer(void **buffer, size_t *size) {
+    bool ret = true;
+    Lock();
+    if (symbol_table()->pa_stream_begin_write()(stream_.stream(), buffer, size)
+            != 0) {
+      LOG(LS_ERROR) << "Can't get write buffer";
+      ret = false;
+    }
+    Unlock();
+    return ret;
+  }
+
+  // Releases the caller's hold on the write buffer. "written" must be the
+  // amount of data that was written.
+  virtual bool ReleaseWriteBuffer(void *buffer, size_t written) {
+    bool ret = true;
+    Lock();
+    if (written == 0) {
+      if (symbol_table()->pa_stream_cancel_write()(stream_.stream()) != 0) {
+        LOG(LS_ERROR) << "Can't cancel write";
+        ret = false;
+      }
+    } else {
+      if (symbol_table()->pa_stream_write()(stream_.stream(),
+                                            buffer,
+                                            written,
+                                            NULL,
+                                            0,
+                                            PA_SEEK_RELATIVE) != 0) {
+        LOG(LS_ERROR) << "Unable to write";
+        ret = false;
+      }
+    }
+    Unlock();
+    return ret;
+  }
+#endif
+
+ private:
+  void Lock() {
+    stream_.Lock();
+  }
+
+  void Unlock() {
+    stream_.Unlock();
+  }
+
+  PulseAudioSymbolTable *symbol_table() {
+    return stream_.symbol_table();
+  }
+
+  void EnableWriteCallback() {
+    pa_stream_state_t state = symbol_table()->pa_stream_get_state()(
+        stream_.stream());
+    if (state == PA_STREAM_READY) {
+      // May already have available space. Must check.
+      temp_buffer_space_ = symbol_table()->pa_stream_writable_size()(
+          stream_.stream());
+      if (temp_buffer_space_ > 0) {
+        // Yup, there is already space available, so if we register a write
+        // callback then it will not receive any event. So dispatch one ourself
+        // instead.
+        HaveWork();
+        return;
+      }
+    }
+    symbol_table()->pa_stream_set_write_callback()(
+         stream_.stream(),
+         &WriteCallbackThunk,
+         this);
+  }
+
+  void DisableWriteCallback() {
+    symbol_table()->pa_stream_set_write_callback()(
+         stream_.stream(),
+         NULL,
+         NULL);
+  }
+
+  static void WriteCallbackThunk(pa_stream *unused,
+                                 size_t buffer_space,
+                                 void *userdata) {
+    PulseAudioOutputStream *instance =
+        static_cast<PulseAudioOutputStream *>(userdata);
+    instance->OnWriteCallback(buffer_space);
+  }
+
+  void OnWriteCallback(size_t buffer_space) {
+    temp_buffer_space_ = buffer_space;
+    // Since we write the data asynchronously on a different thread, we have
+    // to temporarily disable the write callback or else Pulse will call it
+    // continuously until we write the data. We re-enable it below.
+    DisableWriteCallback();
+    HaveWork();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStart() {
+    Lock();
+    EnableWriteCallback();
+    Unlock();
+  }
+
+  // Inherited from Worker.
+  virtual void OnHaveWork() {
+    ASSERT(temp_buffer_space_ > 0);
+
+    SignalBufferSpace(temp_buffer_space_, this);
+
+    temp_buffer_space_ = 0;
+    Lock();
+    EnableWriteCallback();
+    Unlock();
+  }
+
+  // Inherited from Worker.
+  virtual void OnStop() {
+    Lock();
+    DisableWriteCallback();
+    Unlock();
+  }
+
+  static void UnderflowCallbackThunk(pa_stream *unused,
+                                     void *userdata) {
+    PulseAudioOutputStream *instance =
+        static_cast<PulseAudioOutputStream *>(userdata);
+    instance->OnUnderflowCallback();
+  }
+
+  void OnUnderflowCallback() {
+    LOG(LS_WARNING) << "Buffer underflow on playback stream "
+                    << stream_.stream();
+
+    if (configured_latency_ == SoundSystemInterface::kNoLatencyRequirements) {
+      // We didn't configure a pa_buffer_attr before, so switching to one now
+      // would be questionable.
+      return;
+    }
+
+    // Otherwise reconfigure the stream with a higher target latency.
+
+    const pa_sample_spec *spec = symbol_table()->pa_stream_get_sample_spec()(
+        stream_.stream());
+    if (!spec) {
+      LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
+      return;
+    }
+
+    size_t bytes_per_sec = symbol_table()->pa_bytes_per_second()(spec);
+
+    int new_latency = configured_latency_ +
+        bytes_per_sec * kPlaybackLatencyIncrementMsecs /
+        rtc::kNumMicrosecsPerSec;
+
+    pa_buffer_attr new_attr = {0};
+    FillPlaybackBufferAttr(new_latency, &new_attr);
+
+    pa_operation *op = symbol_table()->pa_stream_set_buffer_attr()(
+        stream_.stream(),
+        &new_attr,
+        // No callback.
+        NULL,
+        NULL);
+    if (!op) {
+      LOG(LS_ERROR) << "pa_stream_set_buffer_attr()";
+      return;
+    }
+    // Don't need to wait for this to complete.
+    symbol_table()->pa_operation_unref()(op);
+
+    // Save the new latency in case we underflow again.
+    configured_latency_ = new_latency;
+  }
+
+  static void GetVolumeCallbackThunk(pa_context *unused,
+                                     const pa_sink_input_info *info,
+                                     int eol,
+                                     void *userdata) {
+    GetVolumeCallbackData *data =
+        static_cast<GetVolumeCallbackData *>(userdata);
+    data->instance->OnGetVolumeCallback(info, eol, &data->channel_volumes);
+  }
+
+  void OnGetVolumeCallback(const pa_sink_input_info *info,
+                           int eol,
+                           pa_cvolume **channel_volumes) {
+    if (eol) {
+      // List is over. Wake GetVolume().
+      stream_.pulse()->Signal();
+      return;
+    }
+
+    if (*channel_volumes) {
+      **channel_volumes = info->volume;
+      // Unset the pointer so that we know that we have have already copied the
+      // volume.
+      *channel_volumes = NULL;
+    } else {
+      // We have received an additional callback after the first one, which
+      // doesn't make sense for a single sink input. This probably never
+      // happens, but we code for it anyway.
+      LOG(LS_WARNING) << "Ignoring extra GetVolumeCallback";
+    }
+  }
+
+  static void SetVolumeCallback(pa_context *unused1,
+                                int success,
+                                void *unused2) {
+    if (!success) {
+      LOG(LS_ERROR) << "Failed to change playback volume";
+    }
+  }
+
+  PulseAudioStream stream_;
+  int configured_latency_;
+  // Temporary storage for passing data between threads.
+  size_t temp_buffer_space_;
+
+  DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
+};
+
+PulseAudioSoundSystem::PulseAudioSoundSystem()
+    : mainloop_(NULL), context_(NULL) {
+}
+
+PulseAudioSoundSystem::~PulseAudioSoundSystem() {
+  Terminate();
+}
+
+bool PulseAudioSoundSystem::Init() {
+  if (IsInitialized()) {
+    return true;
+  }
+
+  // Load libpulse.
+  if (!symbol_table_.Load()) {
+    // Most likely the Pulse library and sound server are not installed on
+    // this system.
+    LOG(LS_WARNING) << "Failed to load symbol table";
+    return false;
+  }
+
+  // Now create and start the Pulse event thread.
+  mainloop_ = symbol_table_.pa_threaded_mainloop_new()();
+  if (!mainloop_) {
+    LOG(LS_ERROR) << "Can't create mainloop";
+    goto fail0;
+  }
+
+  if (symbol_table_.pa_threaded_mainloop_start()(mainloop_) != 0) {
+    LOG(LS_ERROR) << "Can't start mainloop";
+    goto fail1;
+  }
+
+  Lock();
+  context_ = CreateNewConnection();
+  Unlock();
+
+  if (!context_) {
+    goto fail2;
+  }
+
+  // Otherwise we're now ready!
+  return true;
+
+ fail2:
+  symbol_table_.pa_threaded_mainloop_stop()(mainloop_);
+ fail1:
+  symbol_table_.pa_threaded_mainloop_free()(mainloop_);
+  mainloop_ = NULL;
+ fail0:
+  return false;
+}
+
+void PulseAudioSoundSystem::Terminate() {
+  if (!IsInitialized()) {
+    return;
+  }
+
+  Lock();
+  symbol_table_.pa_context_disconnect()(context_);
+  symbol_table_.pa_context_unref()(context_);
+  Unlock();
+  context_ = NULL;
+  symbol_table_.pa_threaded_mainloop_stop()(mainloop_);
+  symbol_table_.pa_threaded_mainloop_free()(mainloop_);
+  mainloop_ = NULL;
+
+  // We do not unload the symbol table because we may need it again soon if
+  // Init() is called again.
+}
+
+bool PulseAudioSoundSystem::EnumeratePlaybackDevices(
+    SoundDeviceLocatorList *devices) {
+  return EnumerateDevices<pa_sink_info>(
+      devices,
+      symbol_table_.pa_context_get_sink_info_list(),
+      &EnumeratePlaybackDevicesCallbackThunk);
+}
+
+bool PulseAudioSoundSystem::EnumerateCaptureDevices(
+    SoundDeviceLocatorList *devices) {
+  return EnumerateDevices<pa_source_info>(
+      devices,
+      symbol_table_.pa_context_get_source_info_list(),
+      &EnumerateCaptureDevicesCallbackThunk);
+}
+
+bool PulseAudioSoundSystem::GetDefaultPlaybackDevice(
+    SoundDeviceLocator **device) {
+  return GetDefaultDevice<&pa_server_info::default_sink_name>(device);
+}
+
+bool PulseAudioSoundSystem::GetDefaultCaptureDevice(
+    SoundDeviceLocator **device) {
+  return GetDefaultDevice<&pa_server_info::default_source_name>(device);
+}
+
+SoundOutputStreamInterface *PulseAudioSoundSystem::OpenPlaybackDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return OpenDevice<SoundOutputStreamInterface>(
+      device,
+      params,
+      "Playback",
+      &PulseAudioSoundSystem::ConnectOutputStream);
+}
+
+SoundInputStreamInterface *PulseAudioSoundSystem::OpenCaptureDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return OpenDevice<SoundInputStreamInterface>(
+      device,
+      params,
+      "Capture",
+      &PulseAudioSoundSystem::ConnectInputStream);
+}
+
+const char *PulseAudioSoundSystem::GetName() const {
+  return "PulseAudio";
+}
+
+inline bool PulseAudioSoundSystem::IsInitialized() {
+  return mainloop_ != NULL;
+}
+
+struct ConnectToPulseCallbackData {
+  PulseAudioSoundSystem *instance;
+  bool connect_done;
+};
+
+void PulseAudioSoundSystem::ConnectToPulseCallbackThunk(
+    pa_context *context, void *userdata) {
+  ConnectToPulseCallbackData *data =
+      static_cast<ConnectToPulseCallbackData *>(userdata);
+  data->instance->OnConnectToPulseCallback(context, &data->connect_done);
+}
+
+void PulseAudioSoundSystem::OnConnectToPulseCallback(
+    pa_context *context, bool *connect_done) {
+  pa_context_state_t state = symbol_table_.pa_context_get_state()(context);
+  if (state == PA_CONTEXT_READY ||
+      state == PA_CONTEXT_FAILED ||
+      state == PA_CONTEXT_TERMINATED) {
+    // Connection process has reached a terminal state. Wake ConnectToPulse().
+    *connect_done = true;
+    Signal();
+  }
+}
+
+// Must be called with the lock held.
+bool PulseAudioSoundSystem::ConnectToPulse(pa_context *context) {
+  bool ret = true;
+  ConnectToPulseCallbackData data;
+  // Have to put this up here to satisfy the compiler.
+  pa_context_state_t state;
+
+  data.instance = this;
+  data.connect_done = false;
+
+  symbol_table_.pa_context_set_state_callback()(context,
+                                                &ConnectToPulseCallbackThunk,
+                                                &data);
+
+  // Connect to PulseAudio sound server.
+  if (symbol_table_.pa_context_connect()(
+          context,
+          NULL,          // Default server
+          PA_CONTEXT_NOAUTOSPAWN,
+          NULL) != 0) {  // No special fork handling needed
+    LOG(LS_ERROR) << "Can't start connection to PulseAudio sound server";
+    ret = false;
+    goto done;
+  }
+
+  // Wait for the connection state machine to reach a terminal state.
+  do {
+    Wait();
+  } while (!data.connect_done);
+
+  // Now check to see what final state we reached.
+  state = symbol_table_.pa_context_get_state()(context);
+
+  if (state != PA_CONTEXT_READY) {
+    if (state == PA_CONTEXT_FAILED) {
+      LOG(LS_ERROR) << "Failed to connect to PulseAudio sound server";
+    } else if (state == PA_CONTEXT_TERMINATED) {
+      LOG(LS_ERROR) << "PulseAudio connection terminated early";
+    } else {
+      // Shouldn't happen, because we only signal on one of those three states.
+      LOG(LS_ERROR) << "Unknown problem connecting to PulseAudio";
+    }
+    ret = false;
+  }
+
+ done:
+  // We unset our callback for safety just in case the state might somehow
+  // change later, because the pointer to "data" will be invalid after return
+  // from this function.
+  symbol_table_.pa_context_set_state_callback()(context, NULL, NULL);
+  return ret;
+}
+
+// Must be called with the lock held.
+pa_context *PulseAudioSoundSystem::CreateNewConnection() {
+  // Create connection context.
+  std::string app_name;
+  // TODO: Pulse etiquette says this name should be localized. Do
+  // we care?
+  rtc::Filesystem::GetApplicationName(&app_name);
+  pa_context *context = symbol_table_.pa_context_new()(
+      symbol_table_.pa_threaded_mainloop_get_api()(mainloop_),
+      app_name.c_str());
+  if (!context) {
+    LOG(LS_ERROR) << "Can't create context";
+    goto fail0;
+  }
+
+  // Now connect.
+  if (!ConnectToPulse(context)) {
+    goto fail1;
+  }
+
+  // Otherwise the connection succeeded and is ready.
+  return context;
+
+ fail1:
+  symbol_table_.pa_context_unref()(context);
+ fail0:
+  return NULL;
+}
+
+struct EnumerateDevicesCallbackData {
+  PulseAudioSoundSystem *instance;
+  SoundSystemInterface::SoundDeviceLocatorList *devices;
+};
+
+void PulseAudioSoundSystem::EnumeratePlaybackDevicesCallbackThunk(
+    pa_context *unused,
+    const pa_sink_info *info,
+    int eol,
+    void *userdata) {
+  EnumerateDevicesCallbackData *data =
+      static_cast<EnumerateDevicesCallbackData *>(userdata);
+  data->instance->OnEnumeratePlaybackDevicesCallback(data->devices, info, eol);
+}
+
+void PulseAudioSoundSystem::EnumerateCaptureDevicesCallbackThunk(
+    pa_context *unused,
+    const pa_source_info *info,
+    int eol,
+    void *userdata) {
+  EnumerateDevicesCallbackData *data =
+      static_cast<EnumerateDevicesCallbackData *>(userdata);
+  data->instance->OnEnumerateCaptureDevicesCallback(data->devices, info, eol);
+}
+
+void PulseAudioSoundSystem::OnEnumeratePlaybackDevicesCallback(
+    SoundDeviceLocatorList *devices,
+    const pa_sink_info *info,
+    int eol) {
+  if (eol) {
+    // List is over. Wake EnumerateDevices().
+    Signal();
+    return;
+  }
+
+  // Else this is the next device.
+  devices->push_back(
+      new PulseAudioDeviceLocator(info->description, info->name));
+}
+
+void PulseAudioSoundSystem::OnEnumerateCaptureDevicesCallback(
+    SoundDeviceLocatorList *devices,
+    const pa_source_info *info,
+    int eol) {
+  if (eol) {
+    // List is over. Wake EnumerateDevices().
+    Signal();
+    return;
+  }
+
+  if (info->monitor_of_sink != PA_INVALID_INDEX) {
+    // We don't want to list monitor sources, since they are almost certainly
+    // not what the user wants for voice conferencing.
+    return;
+  }
+
+  // Else this is the next device.
+  devices->push_back(
+      new PulseAudioDeviceLocator(info->description, info->name));
+}
+
+template <typename InfoStruct>
+bool PulseAudioSoundSystem::EnumerateDevices(
+    SoundDeviceLocatorList *devices,
+    pa_operation *(*enumerate_fn)(
+        pa_context *c,
+        void (*callback_fn)(
+            pa_context *c,
+            const InfoStruct *i,
+            int eol,
+            void *userdata),
+        void *userdata),
+    void (*callback_fn)(
+        pa_context *c,
+        const InfoStruct *i,
+        int eol,
+        void *userdata)) {
+  ClearSoundDeviceLocatorList(devices);
+  if (!IsInitialized()) {
+    return false;
+  }
+
+  EnumerateDevicesCallbackData data;
+  data.instance = this;
+  data.devices = devices;
+
+  Lock();
+  pa_operation *op = (*enumerate_fn)(
+      context_,
+      callback_fn,
+      &data);
+  bool ret = FinishOperation(op);
+  Unlock();
+  return ret;
+}
+
+struct GetDefaultDeviceCallbackData {
+  PulseAudioSoundSystem *instance;
+  SoundDeviceLocator **device;
+};
+
+template <const char *(pa_server_info::*field)>
+void PulseAudioSoundSystem::GetDefaultDeviceCallbackThunk(
+    pa_context *unused,
+    const pa_server_info *info,
+    void *userdata) {
+  GetDefaultDeviceCallbackData *data =
+      static_cast<GetDefaultDeviceCallbackData *>(userdata);
+  data->instance->OnGetDefaultDeviceCallback<field>(info, data->device);
+}
+
+template <const char *(pa_server_info::*field)>
+void PulseAudioSoundSystem::OnGetDefaultDeviceCallback(
+    const pa_server_info *info,
+    SoundDeviceLocator **device) {
+  if (info) {
+    const char *dev = info->*field;
+    if (dev) {
+      *device = new PulseAudioDeviceLocator("Default device", dev);
+    }
+  }
+  Signal();
+}
+
+template <const char *(pa_server_info::*field)>
+bool PulseAudioSoundSystem::GetDefaultDevice(SoundDeviceLocator **device) {
+  if (!IsInitialized()) {
+    return false;
+  }
+  bool ret;
+  *device = NULL;
+  GetDefaultDeviceCallbackData data;
+  data.instance = this;
+  data.device = device;
+  Lock();
+  pa_operation *op = symbol_table_.pa_context_get_server_info()(
+      context_,
+      &GetDefaultDeviceCallbackThunk<field>,
+      &data);
+  ret = FinishOperation(op);
+  Unlock();
+  return ret && (*device != NULL);
+}
+
+void PulseAudioSoundSystem::StreamStateChangedCallbackThunk(
+    pa_stream *stream,
+    void *userdata) {
+  PulseAudioSoundSystem *instance =
+      static_cast<PulseAudioSoundSystem *>(userdata);
+  instance->OnStreamStateChangedCallback(stream);
+}
+
+void PulseAudioSoundSystem::OnStreamStateChangedCallback(pa_stream *stream) {
+  pa_stream_state_t state = symbol_table_.pa_stream_get_state()(stream);
+  if (state == PA_STREAM_READY) {
+    LOG(LS_INFO) << "Pulse stream " << stream << " ready";
+  } else if (state == PA_STREAM_FAILED ||
+             state == PA_STREAM_TERMINATED ||
+             state == PA_STREAM_UNCONNECTED) {
+    LOG(LS_ERROR) << "Pulse stream " << stream << " failed to connect: "
+                  << LastError();
+  }
+}
+
+template <typename StreamInterface>
+StreamInterface *PulseAudioSoundSystem::OpenDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params,
+    const char *stream_name,
+    StreamInterface *(PulseAudioSoundSystem::*connect_fn)(
+        pa_stream *stream,
+        const char *dev,
+        int flags,
+        pa_stream_flags_t pa_flags,
+        int latency,
+        const pa_sample_spec &spec)) {
+  if (!IsInitialized()) {
+    return NULL;
+  }
+
+  const char *dev = static_cast<const PulseAudioDeviceLocator *>(device)->
+      device_name().c_str();
+
+  StreamInterface *stream_interface = NULL;
+
+  ASSERT(params.format < ARRAY_SIZE(kCricketFormatToPulseFormatTable));
+
+  pa_sample_spec spec;
+  spec.format = kCricketFormatToPulseFormatTable[params.format];
+  spec.rate = params.freq;
+  spec.channels = params.channels;
+
+  int pa_flags = 0;
+  if (params.flags & FLAG_REPORT_LATENCY) {
+    pa_flags |= PA_STREAM_INTERPOLATE_TIMING |
+                PA_STREAM_AUTO_TIMING_UPDATE;
+  }
+
+  if (params.latency != kNoLatencyRequirements) {
+    // If configuring a specific latency then we want to specify
+    // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
+    // automatically to reach that target latency. However, that flag doesn't
+    // exist in Ubuntu 8.04 and many people still use that, so we have to check
+    // the protocol version of libpulse.
+    if (symbol_table_.pa_context_get_protocol_version()(context_) >=
+        kAdjustLatencyProtocolVersion) {
+      pa_flags |= PA_STREAM_ADJUST_LATENCY;
+    }
+  }
+
+  Lock();
+
+  pa_stream *stream = symbol_table_.pa_stream_new()(context_, stream_name,
+      &spec, NULL);
+  if (!stream) {
+    LOG(LS_ERROR) << "Can't create pa_stream";
+    goto done;
+  }
+
+  // Set a state callback to log errors.
+  symbol_table_.pa_stream_set_state_callback()(stream,
+                                               &StreamStateChangedCallbackThunk,
+                                               this);
+
+  stream_interface = (this->*connect_fn)(
+      stream,
+      dev,
+      params.flags,
+      static_cast<pa_stream_flags_t>(pa_flags),
+      params.latency,
+      spec);
+  if (!stream_interface) {
+    LOG(LS_ERROR) << "Can't connect stream to " << dev;
+    symbol_table_.pa_stream_unref()(stream);
+  }
+
+ done:
+  Unlock();
+  return stream_interface;
+}
+
+// Must be called with the lock held.
+SoundOutputStreamInterface *PulseAudioSoundSystem::ConnectOutputStream(
+    pa_stream *stream,
+    const char *dev,
+    int flags,
+    pa_stream_flags_t pa_flags,
+    int latency,
+    const pa_sample_spec &spec) {
+  pa_buffer_attr attr = {0};
+  pa_buffer_attr *pattr = NULL;
+  if (latency != kNoLatencyRequirements) {
+    // kLowLatency is 0, so we treat it the same as a request for zero latency.
+    ssize_t bytes_per_sec = symbol_table_.pa_bytes_per_second()(&spec);
+    latency = rtc::_max(
+        latency,
+        static_cast<int>(
+            bytes_per_sec * kPlaybackLatencyMinimumMsecs /
+            rtc::kNumMicrosecsPerSec));
+    FillPlaybackBufferAttr(latency, &attr);
+    pattr = &attr;
+  }
+  if (symbol_table_.pa_stream_connect_playback()(
+          stream,
+          dev,
+          pattr,
+          pa_flags,
+          // Let server choose volume
+          NULL,
+          // Not synchronized to any other playout
+          NULL) != 0) {
+    return NULL;
+  }
+  return new PulseAudioOutputStream(this, stream, flags, latency);
+}
+
+// Must be called with the lock held.
+SoundInputStreamInterface *PulseAudioSoundSystem::ConnectInputStream(
+    pa_stream *stream,
+    const char *dev,
+    int flags,
+    pa_stream_flags_t pa_flags,
+    int latency,
+    const pa_sample_spec &spec) {
+  pa_buffer_attr attr = {0};
+  pa_buffer_attr *pattr = NULL;
+  if (latency != kNoLatencyRequirements) {
+    size_t bytes_per_sec = symbol_table_.pa_bytes_per_second()(&spec);
+    if (latency == kLowLatency) {
+      latency = bytes_per_sec * kLowCaptureLatencyMsecs /
+          rtc::kNumMicrosecsPerSec;
+    }
+    // Note: fragsize specifies a maximum transfer size, not a minimum, so it is
+    // not possible to force a high latency setting, only a low one.
+    attr.fragsize = latency;
+    attr.maxlength = latency + bytes_per_sec * kCaptureBufferExtraMsecs /
+        rtc::kNumMicrosecsPerSec;
+    LOG(LS_VERBOSE) << "Configuring latency = " << attr.fragsize
+                    << ", maxlength = " << attr.maxlength;
+    pattr = &attr;
+  }
+  if (symbol_table_.pa_stream_connect_record()(stream,
+                                               dev,
+                                               pattr,
+                                               pa_flags) != 0) {
+    return NULL;
+  }
+  return new PulseAudioInputStream(this, stream, flags);
+}
+
+// Must be called with the lock held.
+bool PulseAudioSoundSystem::FinishOperation(pa_operation *op) {
+  if (!op) {
+    LOG(LS_ERROR) << "Failed to start operation";
+    return false;
+  }
+
+  do {
+    Wait();
+  } while (symbol_table_.pa_operation_get_state()(op) == PA_OPERATION_RUNNING);
+
+  symbol_table_.pa_operation_unref()(op);
+
+  return true;
+}
+
+inline void PulseAudioSoundSystem::Lock() {
+  symbol_table_.pa_threaded_mainloop_lock()(mainloop_);
+}
+
+inline void PulseAudioSoundSystem::Unlock() {
+  symbol_table_.pa_threaded_mainloop_unlock()(mainloop_);
+}
+
+// Must be called with the lock held.
+inline void PulseAudioSoundSystem::Wait() {
+  symbol_table_.pa_threaded_mainloop_wait()(mainloop_);
+}
+
+// Must be called with the lock held.
+inline void PulseAudioSoundSystem::Signal() {
+  symbol_table_.pa_threaded_mainloop_signal()(mainloop_, 0);
+}
+
+// Must be called with the lock held.
+const char *PulseAudioSoundSystem::LastError() {
+  return symbol_table_.pa_strerror()(symbol_table_.pa_context_errno()(
+      context_));
+}
+
+}  // namespace rtc
+
+#endif  // HAVE_LIBPULSE
diff --git a/sound/pulseaudiosoundsystem.h b/sound/pulseaudiosoundsystem.h
new file mode 100644
index 0000000..4e67acc
--- /dev/null
+++ b/sound/pulseaudiosoundsystem.h
@@ -0,0 +1,177 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
+#define WEBRTC_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
+
+#ifdef HAVE_LIBPULSE
+
+#include "webrtc/sound/pulseaudiosymboltable.h"
+#include "webrtc/sound/soundsysteminterface.h"
+#include "webrtc/base/constructormagic.h"
+
+namespace rtc {
+
+class PulseAudioInputStream;
+class PulseAudioOutputStream;
+class PulseAudioStream;
+
+// Sound system implementation for PulseAudio, a cross-platform sound server
+// (but commonly used only on Linux, which is the only platform we support
+// it on).
+// Init(), Terminate(), and the destructor should never be invoked concurrently,
+// but all other methods are thread-safe.
+class PulseAudioSoundSystem : public SoundSystemInterface {
+  friend class PulseAudioInputStream;
+  friend class PulseAudioOutputStream;
+  friend class PulseAudioStream;
+ public:
+  static SoundSystemInterface *Create() {
+    return new PulseAudioSoundSystem();
+  }
+
+  PulseAudioSoundSystem();
+
+  virtual ~PulseAudioSoundSystem();
+
+  virtual bool Init();
+  virtual void Terminate();
+
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
+
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
+
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+
+  virtual const char *GetName() const;
+
+ private:
+  bool IsInitialized();
+
+  static void ConnectToPulseCallbackThunk(pa_context *context, void *userdata);
+
+  void OnConnectToPulseCallback(pa_context *context, bool *connect_done);
+
+  bool ConnectToPulse(pa_context *context);
+
+  pa_context *CreateNewConnection();
+
+  template <typename InfoStruct>
+  bool EnumerateDevices(SoundDeviceLocatorList *devices,
+                        pa_operation *(*enumerate_fn)(
+                            pa_context *c,
+                            void (*callback_fn)(
+                                pa_context *c,
+                                const InfoStruct *i,
+                                int eol,
+                                void *userdata),
+                            void *userdata),
+                        void (*callback_fn)(
+                            pa_context *c,
+                            const InfoStruct *i,
+                            int eol,
+                            void *userdata));
+
+  static void EnumeratePlaybackDevicesCallbackThunk(pa_context *unused,
+                                                    const pa_sink_info *info,
+                                                    int eol,
+                                                    void *userdata);
+
+  static void EnumerateCaptureDevicesCallbackThunk(pa_context *unused,
+                                                   const pa_source_info *info,
+                                                   int eol,
+                                                   void *userdata);
+
+  void OnEnumeratePlaybackDevicesCallback(
+      SoundDeviceLocatorList *devices,
+      const pa_sink_info *info,
+      int eol);
+
+  void OnEnumerateCaptureDevicesCallback(
+      SoundDeviceLocatorList *devices,
+      const pa_source_info *info,
+      int eol);
+
+  template <const char *(pa_server_info::*field)>
+  static void GetDefaultDeviceCallbackThunk(
+      pa_context *unused,
+      const pa_server_info *info,
+      void *userdata);
+
+  template <const char *(pa_server_info::*field)>
+  void OnGetDefaultDeviceCallback(
+      const pa_server_info *info,
+      SoundDeviceLocator **device);
+
+  template <const char *(pa_server_info::*field)>
+  bool GetDefaultDevice(SoundDeviceLocator **device);
+
+  static void StreamStateChangedCallbackThunk(pa_stream *stream,
+                                              void *userdata);
+
+  void OnStreamStateChangedCallback(pa_stream *stream);
+
+  template <typename StreamInterface>
+  StreamInterface *OpenDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params,
+      const char *stream_name,
+      StreamInterface *(PulseAudioSoundSystem::*connect_fn)(
+          pa_stream *stream,
+          const char *dev,
+          int flags,
+          pa_stream_flags_t pa_flags,
+          int latency,
+          const pa_sample_spec &spec));
+
+  SoundOutputStreamInterface *ConnectOutputStream(
+      pa_stream *stream,
+      const char *dev,
+      int flags,
+      pa_stream_flags_t pa_flags,
+      int latency,
+      const pa_sample_spec &spec);
+
+  SoundInputStreamInterface *ConnectInputStream(
+      pa_stream *stream,
+      const char *dev,
+      int flags,
+      pa_stream_flags_t pa_flags,
+      int latency,
+      const pa_sample_spec &spec);
+
+  bool FinishOperation(pa_operation *op);
+
+  void Lock();
+  void Unlock();
+  void Wait();
+  void Signal();
+
+  const char *LastError();
+
+  pa_threaded_mainloop *mainloop_;
+  pa_context *context_;
+  PulseAudioSymbolTable symbol_table_;
+
+  DISALLOW_COPY_AND_ASSIGN(PulseAudioSoundSystem);
+};
+
+}  // namespace rtc
+
+#endif  // HAVE_LIBPULSE
+
+#endif  // WEBRTC_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
diff --git a/sound/pulseaudiosymboltable.cc b/sound/pulseaudiosymboltable.cc
new file mode 100644
index 0000000..7dd073f
--- /dev/null
+++ b/sound/pulseaudiosymboltable.cc
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifdef HAVE_LIBPULSE
+
+#include "webrtc/sound/pulseaudiosymboltable.h"
+
+namespace rtc {
+
+#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME PULSE_AUDIO_SYMBOLS_CLASS_NAME
+#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST PULSE_AUDIO_SYMBOLS_LIST
+#define LATE_BINDING_SYMBOL_TABLE_DLL_NAME "libpulse.so.0"
+#include "webrtc/base/latebindingsymboltable.cc.def"
+
+}  // namespace rtc
+
+#endif  // HAVE_LIBPULSE
diff --git a/sound/pulseaudiosymboltable.h b/sound/pulseaudiosymboltable.h
new file mode 100644
index 0000000..35e9523
--- /dev/null
+++ b/sound/pulseaudiosymboltable.h
@@ -0,0 +1,87 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_PULSEAUDIOSYMBOLTABLE_H_
+#define WEBRTC_SOUND_PULSEAUDIOSYMBOLTABLE_H_
+
+#include <pulse/context.h>
+#include <pulse/def.h>
+#include <pulse/error.h>
+#include <pulse/introspect.h>
+#include <pulse/stream.h>
+#include <pulse/thread-mainloop.h>
+
+#include "webrtc/base/latebindingsymboltable.h"
+
+namespace rtc {
+
+#define PULSE_AUDIO_SYMBOLS_CLASS_NAME PulseAudioSymbolTable
+// The PulseAudio symbols we need, as an X-Macro list.
+// This list must contain precisely every libpulse function that is used in
+// pulseaudiosoundsystem.cc.
+#define PULSE_AUDIO_SYMBOLS_LIST \
+  X(pa_bytes_per_second) \
+  X(pa_context_connect) \
+  X(pa_context_disconnect) \
+  X(pa_context_errno) \
+  X(pa_context_get_protocol_version) \
+  X(pa_context_get_server_info) \
+  X(pa_context_get_sink_info_list) \
+  X(pa_context_get_sink_input_info) \
+  X(pa_context_get_source_info_by_index) \
+  X(pa_context_get_source_info_list) \
+  X(pa_context_get_state) \
+  X(pa_context_new) \
+  X(pa_context_set_sink_input_volume) \
+  X(pa_context_set_source_volume_by_index) \
+  X(pa_context_set_state_callback) \
+  X(pa_context_unref) \
+  X(pa_cvolume_set) \
+  X(pa_operation_get_state) \
+  X(pa_operation_unref) \
+  X(pa_stream_connect_playback) \
+  X(pa_stream_connect_record) \
+  X(pa_stream_disconnect) \
+  X(pa_stream_drop) \
+  X(pa_stream_get_device_index) \
+  X(pa_stream_get_index) \
+  X(pa_stream_get_latency) \
+  X(pa_stream_get_sample_spec) \
+  X(pa_stream_get_state) \
+  X(pa_stream_new) \
+  X(pa_stream_peek) \
+  X(pa_stream_readable_size) \
+  X(pa_stream_set_buffer_attr) \
+  X(pa_stream_set_overflow_callback) \
+  X(pa_stream_set_read_callback) \
+  X(pa_stream_set_state_callback) \
+  X(pa_stream_set_underflow_callback) \
+  X(pa_stream_set_write_callback) \
+  X(pa_stream_unref) \
+  X(pa_stream_writable_size) \
+  X(pa_stream_write) \
+  X(pa_strerror) \
+  X(pa_threaded_mainloop_free) \
+  X(pa_threaded_mainloop_get_api) \
+  X(pa_threaded_mainloop_lock) \
+  X(pa_threaded_mainloop_new) \
+  X(pa_threaded_mainloop_signal) \
+  X(pa_threaded_mainloop_start) \
+  X(pa_threaded_mainloop_stop) \
+  X(pa_threaded_mainloop_unlock) \
+  X(pa_threaded_mainloop_wait)
+
+#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME PULSE_AUDIO_SYMBOLS_CLASS_NAME
+#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST PULSE_AUDIO_SYMBOLS_LIST
+#include "webrtc/base/latebindingsymboltable.h.def"
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_PULSEAUDIOSYMBOLTABLE_H_
diff --git a/sound/rtc_sound_unittest.isolate b/sound/rtc_sound_unittest.isolate
new file mode 100644
index 0000000..38a2acb
--- /dev/null
+++ b/sound/rtc_sound_unittest.isolate
@@ -0,0 +1,25 @@
+# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'conditions': [
+    ['OS=="linux" or OS=="mac" or OS=="win"', {
+      'variables': {
+        'command': [
+          '<(PRODUCT_DIR)/rtc_sound_unittest<(EXECUTABLE_SUFFIX)',
+        ],
+        'isolate_dependency_tracked': [
+          '<(PRODUCT_DIR)/rtc_sound_unittest<(EXECUTABLE_SUFFIX)',
+        ],
+        'isolate_dependency_untracked': [
+          '<(DEPTH)/tools/swarming_client/',
+        ],
+      },
+    }],
+  ],
+}
diff --git a/sound/sound.gyp b/sound/sound.gyp
new file mode 100644
index 0000000..b1a163f
--- /dev/null
+++ b/sound/sound.gyp
@@ -0,0 +1,55 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'includes': [ '../build/common.gypi', ],
+  'targets': [
+    {
+      'target_name': 'rtc_sound',
+      'type': 'static_library',
+      'dependencies': [
+        '<(webrtc_root)/base/base.gyp:webrtc_base',
+      ],
+      'sources': [
+        'automaticallychosensoundsystem.h',
+        'nullsoundsystem.cc',
+        'nullsoundsystem.h',
+        'nullsoundsystemfactory.cc',
+        'nullsoundsystemfactory.h',
+        'platformsoundsystem.cc',
+        'platformsoundsystem.h',
+        'platformsoundsystemfactory.cc',
+        'platformsoundsystemfactory.h',
+        'sounddevicelocator.h',
+        'soundinputstreaminterface.h',
+        'soundoutputstreaminterface.h',
+        'soundsystemfactory.h',
+        'soundsysteminterface.cc',
+        'soundsysteminterface.h',
+        'soundsystemproxy.cc',
+        'soundsystemproxy.h',
+      ],
+      'conditions': [
+        ['OS=="linux"', {
+          'sources': [
+            'alsasoundsystem.cc',
+            'alsasoundsystem.h',
+            'alsasymboltable.cc',
+            'alsasymboltable.h',
+            'linuxsoundsystem.cc',
+            'linuxsoundsystem.h',
+            'pulseaudiosoundsystem.cc',
+            'pulseaudiosoundsystem.h',
+            'pulseaudiosymboltable.cc',
+            'pulseaudiosymboltable.h',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/sound/sound_tests.gyp b/sound/sound_tests.gyp
new file mode 100644
index 0000000..53f979f
--- /dev/null
+++ b/sound/sound_tests.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'includes': [ '../build/common.gypi', ],
+  'targets': [
+    {
+      'target_name': 'rtc_sound_unittest',
+      'type': 'executable',
+      'dependencies': [
+        '<(DEPTH)/testing/gtest.gyp:gtest',
+        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/sound/sound.gyp:rtc_sound',
+      ],
+      'cflags_cc!': [
+        '-Wnon-virtual-dtor',
+      ],
+      'sources': [
+        'automaticallychosensoundsystem_unittest.cc',
+      ],
+    },
+  ],
+  'conditions': [
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'rtc_sound_unittest_run',
+          'type': 'none',
+          'dependencies': [
+            'rtc_sound_unittest',
+          ],
+          'includes': [
+            '../build/isolate.gypi',
+            'rtc_sound_unittest.isolate',
+          ],
+          'sources': [
+            'rtc_sound_unittest.isolate',
+          ],
+        },
+      ],
+    }],
+  ],
+}
+  
diff --git a/sound/sounddevicelocator.h b/sound/sounddevicelocator.h
new file mode 100644
index 0000000..4e8e148
--- /dev/null
+++ b/sound/sounddevicelocator.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDDEVICELOCATOR_H_
+#define WEBRTC_SOUND_SOUNDDEVICELOCATOR_H_
+
+#include <string>
+
+#include "webrtc/base/constructormagic.h"
+
+namespace rtc {
+
+// A simple container for holding the name of a device and any additional id
+// information needed to locate and open it. Implementations of
+// SoundSystemInterface must subclass this to add any id information that they
+// need.
+class SoundDeviceLocator {
+ public:
+  virtual ~SoundDeviceLocator() {}
+
+  // Human-readable name for the device.
+  const std::string &name() const { return name_; }
+
+  // Name sound system uses to locate this device.
+  const std::string &device_name() const { return device_name_; }
+
+  // Makes a duplicate of this locator.
+  virtual SoundDeviceLocator *Copy() const = 0;
+
+ protected:
+  SoundDeviceLocator(const std::string &name,
+                     const std::string &device_name)
+      : name_(name), device_name_(device_name) {}
+
+  explicit SoundDeviceLocator(const SoundDeviceLocator &that)
+      : name_(that.name_), device_name_(that.device_name_) {}
+
+  std::string name_;
+  std::string device_name_;
+
+ private:
+  DISALLOW_ASSIGN(SoundDeviceLocator);
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDDEVICELOCATOR_H_
diff --git a/sound/soundinputstreaminterface.h b/sound/soundinputstreaminterface.h
new file mode 100644
index 0000000..6ce9446
--- /dev/null
+++ b/sound/soundinputstreaminterface.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDINPUTSTREAMINTERFACE_H_
+#define WEBRTC_SOUND_SOUNDINPUTSTREAMINTERFACE_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/sigslot.h"
+
+namespace rtc {
+
+// Interface for consuming an input stream from a recording device.
+// Semantics and thread-safety of StartReading()/StopReading() are the same as
+// for rtc::Worker.
+class SoundInputStreamInterface {
+ public:
+  virtual ~SoundInputStreamInterface() {}
+
+  // Starts the reading of samples on the current thread.
+  virtual bool StartReading() = 0;
+  // Stops the reading of samples.
+  virtual bool StopReading() = 0;
+
+  // Retrieves the current input volume for this stream. Nominal range is
+  // defined by SoundSystemInterface::k(Max|Min)Volume, but values exceeding the
+  // max may be possible in some implementations. This call retrieves the actual
+  // volume currently in use by the OS, not a cached value from a previous
+  // (Get|Set)Volume() call.
+  virtual bool GetVolume(int *volume) = 0;
+
+  // Changes the input volume for this stream. Nominal range is defined by
+  // SoundSystemInterface::k(Max|Min)Volume. The effect of exceeding kMaxVolume
+  // is implementation-defined.
+  virtual bool SetVolume(int volume) = 0;
+
+  // Closes this stream object. If currently reading then this may only be
+  // called from the reading thread.
+  virtual bool Close() = 0;
+
+  // Get the latency of the stream.
+  virtual int LatencyUsecs() = 0;
+
+  // Notifies the consumer of new data read from the device.
+  // The first parameter is a pointer to the data read, and is only valid for
+  // the duration of the call.
+  // The second parameter is the amount of data read in bytes (i.e., the valid
+  // length of the memory pointed to).
+  // The 3rd parameter is the stream that is issuing the callback.
+  sigslot::signal3<const void *, size_t,
+      SoundInputStreamInterface *> SignalSamplesRead;
+
+ protected:
+  SoundInputStreamInterface() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SoundInputStreamInterface);
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
diff --git a/sound/soundoutputstreaminterface.h b/sound/soundoutputstreaminterface.h
new file mode 100644
index 0000000..2b501d6
--- /dev/null
+++ b/sound/soundoutputstreaminterface.h
@@ -0,0 +1,72 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
+#define WEBRTC_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/sigslot.h"
+
+namespace rtc {
+
+// Interface for outputting a stream to a playback device.
+// Semantics and thread-safety of EnableBufferMonitoring()/
+// DisableBufferMonitoring() are the same as for rtc::Worker.
+class SoundOutputStreamInterface {
+ public:
+  virtual ~SoundOutputStreamInterface() {}
+
+  // Enables monitoring the available buffer space on the current thread.
+  virtual bool EnableBufferMonitoring() = 0;
+  // Disables the monitoring.
+  virtual bool DisableBufferMonitoring() = 0;
+
+  // Write the given samples to the devices. If currently monitoring then this
+  // may only be called from the monitoring thread.
+  virtual bool WriteSamples(const void *sample_data,
+                            size_t size) = 0;
+
+  // Retrieves the current output volume for this stream. Nominal range is
+  // defined by SoundSystemInterface::k(Max|Min)Volume, but values exceeding the
+  // max may be possible in some implementations. This call retrieves the actual
+  // volume currently in use by the OS, not a cached value from a previous
+  // (Get|Set)Volume() call.
+  virtual bool GetVolume(int *volume) = 0;
+
+  // Changes the output volume for this stream. Nominal range is defined by
+  // SoundSystemInterface::k(Max|Min)Volume. The effect of exceeding kMaxVolume
+  // is implementation-defined.
+  virtual bool SetVolume(int volume) = 0;
+
+  // Closes this stream object. If currently monitoring then this may only be
+  // called from the monitoring thread.
+  virtual bool Close() = 0;
+
+  // Get the latency of the stream.
+  virtual int LatencyUsecs() = 0;
+
+  // Notifies the producer of the available buffer space for writes.
+  // It fires continuously as long as the space is greater than zero.
+  // The first parameter is the amount of buffer space available for data to
+  // be written (i.e., the maximum amount of data that can be written right now
+  // with WriteSamples() without blocking).
+  // The 2nd parameter is the stream that is issuing the callback.
+  sigslot::signal2<size_t, SoundOutputStreamInterface *> SignalBufferSpace;
+
+ protected:
+  SoundOutputStreamInterface() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SoundOutputStreamInterface);
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
diff --git a/sound/soundsystemfactory.h b/sound/soundsystemfactory.h
new file mode 100644
index 0000000..b86246c
--- /dev/null
+++ b/sound/soundsystemfactory.h
@@ -0,0 +1,27 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDSYSTEMFACTORY_H_
+#define WEBRTC_SOUND_SOUNDSYSTEMFACTORY_H_
+
+#include "webrtc/base/referencecountedsingletonfactory.h"
+
+namespace rtc {
+
+class SoundSystemInterface;
+
+typedef rtc::ReferenceCountedSingletonFactory<SoundSystemInterface>
+    SoundSystemFactory;
+
+typedef rtc::rcsf_ptr<SoundSystemInterface> SoundSystemHandle;
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDSYSTEMFACTORY_H_
diff --git a/sound/soundsysteminterface.cc b/sound/soundsysteminterface.cc
new file mode 100644
index 0000000..f1ee5a9
--- /dev/null
+++ b/sound/soundsysteminterface.cc
@@ -0,0 +1,29 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/soundsysteminterface.h"
+
+#include "webrtc/sound/sounddevicelocator.h"
+
+namespace rtc {
+
+void SoundSystemInterface::ClearSoundDeviceLocatorList(
+    SoundSystemInterface::SoundDeviceLocatorList *devices) {
+  for (SoundDeviceLocatorList::iterator i = devices->begin();
+       i != devices->end();
+       ++i) {
+    if (*i) {
+      delete *i;
+    }
+  }
+  devices->clear();
+}
+
+}  // namespace rtc
diff --git a/sound/soundsysteminterface.h b/sound/soundsysteminterface.h
new file mode 100644
index 0000000..aa9a53a
--- /dev/null
+++ b/sound/soundsysteminterface.h
@@ -0,0 +1,112 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDSYSTEMINTERFACE_H_
+#define WEBRTC_SOUND_SOUNDSYSTEMINTERFACE_H_
+
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+
+namespace rtc {
+
+class SoundDeviceLocator;
+class SoundInputStreamInterface;
+class SoundOutputStreamInterface;
+
+// Interface for a platform's sound system.
+// Implementations must guarantee thread-safety for at least the following use
+// cases:
+// 1) Concurrent enumeration and opening of devices from different threads.
+// 2) Concurrent use of different Sound(Input|Output)StreamInterface
+// instances from different threads (but concurrent use of the _same_ one from
+// different threads need not be supported).
+class SoundSystemInterface {
+ public:
+  typedef std::vector<SoundDeviceLocator *> SoundDeviceLocatorList;
+
+  enum SampleFormat {
+    // Only one supported sample format at this time.
+    // The values here may be used in lookup tables, so they shouldn't change.
+    FORMAT_S16LE = 0,
+  };
+
+  enum Flags {
+    // Enable reporting the current stream latency in
+    // Sound(Input|Output)StreamInterface. See those classes for more details.
+    FLAG_REPORT_LATENCY = (1 << 0),
+  };
+
+  struct OpenParams {
+    // Format for the sound stream.
+    SampleFormat format;
+    // Sampling frequency in hertz.
+    unsigned int freq;
+    // Number of channels in the PCM stream.
+    unsigned int channels;
+    // Misc flags. Should be taken from the Flags enum above.
+    int flags;
+    // Desired latency, measured as number of bytes of sample data
+    int latency;
+  };
+
+  // Special values for the "latency" field of OpenParams.
+  // Use this one to say you don't care what the latency is. The sound system
+  // will optimize for other things instead.
+  static const int kNoLatencyRequirements = -1;
+  // Use this one to say that you want the sound system to pick an appropriate
+  // small latency value. The sound system may pick the minimum allowed one, or
+  // a slightly higher one in the event that the true minimum requires an
+  // undesirable trade-off.
+  static const int kLowLatency = 0;
+ 
+  // Max value for the volume parameters for Sound(Input|Output)StreamInterface.
+  static const int kMaxVolume = 255;
+  // Min value for the volume parameters for Sound(Input|Output)StreamInterface.
+  static const int kMinVolume = 0;
+
+  // Helper for clearing a locator list and deleting the entries.
+  static void ClearSoundDeviceLocatorList(SoundDeviceLocatorList *devices);
+
+  virtual ~SoundSystemInterface() {}
+
+  virtual bool Init() = 0;
+  virtual void Terminate() = 0;
+
+  // Enumerates the available devices. (Any pre-existing locators in the lists
+  // are deleted.)
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices) = 0;
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices) = 0;
+
+  // Gets a special locator for the default device.
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device) = 0;
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device) = 0;
+
+  // Opens the given device, or returns NULL on error.
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) = 0;
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params) = 0;
+
+  // A human-readable name for this sound system.
+  virtual const char *GetName() const = 0;
+
+ protected:
+  SoundSystemInterface() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SoundSystemInterface);
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDSYSTEMINTERFACE_H_
diff --git a/sound/soundsystemproxy.cc b/sound/soundsystemproxy.cc
new file mode 100644
index 0000000..e26a6bd
--- /dev/null
+++ b/sound/soundsystemproxy.cc
@@ -0,0 +1,47 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/sound/soundsystemproxy.h"
+
+namespace rtc {
+
+bool SoundSystemProxy::EnumeratePlaybackDevices(
+    SoundDeviceLocatorList *devices) {
+  return wrapped_ ? wrapped_->EnumeratePlaybackDevices(devices) : false;
+}
+
+bool SoundSystemProxy::EnumerateCaptureDevices(
+    SoundDeviceLocatorList *devices) {
+  return wrapped_ ? wrapped_->EnumerateCaptureDevices(devices) : false;
+}
+
+bool SoundSystemProxy::GetDefaultPlaybackDevice(
+    SoundDeviceLocator **device) {
+  return wrapped_ ? wrapped_->GetDefaultPlaybackDevice(device) : false;
+}
+
+bool SoundSystemProxy::GetDefaultCaptureDevice(
+    SoundDeviceLocator **device) {
+  return wrapped_ ? wrapped_->GetDefaultCaptureDevice(device) : false;
+}
+
+SoundOutputStreamInterface *SoundSystemProxy::OpenPlaybackDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return wrapped_ ? wrapped_->OpenPlaybackDevice(device, params) : NULL;
+}
+
+SoundInputStreamInterface *SoundSystemProxy::OpenCaptureDevice(
+    const SoundDeviceLocator *device,
+    const OpenParams &params) {
+  return wrapped_ ? wrapped_->OpenCaptureDevice(device, params) : NULL;
+}
+
+}  // namespace rtc
diff --git a/sound/soundsystemproxy.h b/sound/soundsystemproxy.h
new file mode 100644
index 0000000..d13cf15
--- /dev/null
+++ b/sound/soundsystemproxy.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SOUND_SOUNDSYSTEMPROXY_H_
+#define WEBRTC_SOUND_SOUNDSYSTEMPROXY_H_
+
+#include "webrtc/sound/soundsysteminterface.h"
+#include "webrtc/base/basictypes.h"  // for NULL
+
+namespace rtc {
+
+// A SoundSystemProxy is a sound system that defers to another one.
+// Init(), Terminate(), and GetName() are left as pure virtual, so a sub-class
+// must define them.
+class SoundSystemProxy : public SoundSystemInterface {
+ public:
+  SoundSystemProxy() : wrapped_(NULL) {}
+
+  // Each of these methods simply defers to wrapped_ if non-NULL, else fails.
+
+  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
+  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
+
+  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
+  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
+
+  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+  virtual SoundInputStreamInterface *OpenCaptureDevice(
+      const SoundDeviceLocator *device,
+      const OpenParams &params);
+
+ protected:
+  SoundSystemInterface *wrapped_;
+};
+
+}  // namespace rtc
+
+#endif  // WEBRTC_SOUND_SOUNDSYSTEMPROXY_H_
diff --git a/webrtc.gyp b/webrtc.gyp
index d50552d..96ae6a9 100644
--- a/webrtc.gyp
+++ b/webrtc.gyp
@@ -20,6 +20,7 @@
   'variables': {
     'webrtc_all_dependencies': [
       'base/base.gyp:*',
+      'sound/sound.gyp:*',
       'common.gyp:*',
       'common_audio/common_audio.gyp:*',
       'common_video/common_video.gyp:*',
@@ -43,6 +44,7 @@
           'dependencies': [
             'base/base_tests.gyp:*',
             'common_video/common_video_unittests.gyp:*',
+            'sound/sound_tests.gyp:*',
             'system_wrappers/source/system_wrappers_tests.gyp:*',
             'test/metrics.gyp:*',
             'test/test.gyp:*',