Merge from Chromium at DEPS revision 267aeeb8d85c

This commit was generated by merge_to_master.py.

Change-Id: I3cccc8f04ad0036aecdb7eefe316a059ebcefaf9
diff --git a/OWNERS b/OWNERS
index 570953a..a0b7b4b 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,14 +1,13 @@
 set noparent
-fischman@webrtc.org
 henrike@webrtc.org
 hta@webrtc.org
+jiayl@webrtc.org
 juberti@webrtc.org
 mallinath@webrtc.org
 perkj@webrtc.org
 pthatcher@webrtc.org
 sergeyu@chromium.org
 tommi@webrtc.org
-wu@webrtc.org
 per-file *.isolate=kjellander@webrtc.org
 
 
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index d447947..a7b4851 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -24,11 +24,7 @@
 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # List of files that should not be committed to
-DO_NOT_SUBMIT_FILES = [
-    "talk/media/webrtc/webrtcmediaengine.h",
-    "talk/media/webrtc/webrtcvideoengine.cc",
-    "talk/media/webrtc/webrtcvideoengine.h",
-    "talk/media/webrtc/webrtcvideoengine_unittest.cc"]
+DO_NOT_SUBMIT_FILES = ["talk/media/webrtc/webrtcvideoengine.cc"]
 
 def _LicenseHeader(input_api):
   """Returns the license header regexp."""
diff --git a/app/webrtc/java/android/org/webrtc/VideoRendererGui.java b/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
index 439f942..af625c0 100644
--- a/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
+++ b/app/webrtc/java/android/org/webrtc/VideoRendererGui.java
@@ -37,6 +37,10 @@
 import javax.microedition.khronos.egl.EGLConfig;
 import javax.microedition.khronos.opengles.GL10;
 
+import android.graphics.SurfaceTexture;
+import android.opengl.EGL14;
+import android.opengl.EGLContext;
+import android.opengl.GLES11Ext;
 import android.opengl.GLES20;
 import android.opengl.GLSurfaceView;
 import android.util.Log;
@@ -54,14 +58,28 @@
   private static VideoRendererGui instance = null;
   private static final String TAG = "VideoRendererGui";
   private GLSurfaceView surface;
+  private static EGLContext eglContext = null;
   // Indicates if SurfaceView.Renderer.onSurfaceCreated was called.
   // If true then for every newly created yuv image renderer createTexture()
   // should be called. The variable is accessed on multiple threads and
   // all accesses are synchronized on yuvImageRenderers' object lock.
   private boolean onSurfaceCreatedCalled;
+  private int screenWidth;
+  private int screenHeight;
   // List of yuv renderers.
   private ArrayList<YuvImageRenderer> yuvImageRenderers;
-  private int program;
+  private int yuvProgram;
+  private int oesProgram;
+  // Types of video scaling:
+  // SCALE_ASPECT_FIT - video frame is scaled to fit the size of the view by
+  //    maintaining the aspect ratio (black borders may be displayed).
+  // SCALE_ASPECT_FILL - video frame is scaled to fill the size of the view by
+  //    maintaining the aspect ratio. Some portion of the video frame may be
+  //    clipped.
+  // SCALE_FILL - video frame is scaled to to fill the size of the view. Video
+  //    aspect ratio is changed if necessary.
+  private static enum ScalingType
+      { SCALE_ASPECT_FIT, SCALE_ASPECT_FILL, SCALE_FILL };
 
   private final String VERTEX_SHADER_STRING =
       "varying vec2 interp_tc;\n" +
@@ -73,7 +91,7 @@
       "  interp_tc = in_tc;\n" +
       "}\n";
 
-  private final String FRAGMENT_SHADER_STRING =
+  private final String YUV_FRAGMENT_SHADER_STRING =
       "precision mediump float;\n" +
       "varying vec2 interp_tc;\n" +
       "\n" +
@@ -91,6 +109,19 @@
       "                      y + 1.77 * u, 1);\n" +
       "}\n";
 
+
+  private static final String OES_FRAGMENT_SHADER_STRING =
+      "#extension GL_OES_EGL_image_external : require\n" +
+      "precision mediump float;\n" +
+      "varying vec2 interp_tc;\n" +
+      "\n" +
+      "uniform samplerExternalOES oes_tex;\n" +
+      "\n" +
+      "void main() {\n" +
+      "  gl_FragColor = texture2D(oes_tex, interp_tc);\n" +
+      "}\n";
+
+
   private VideoRendererGui(GLSurfaceView surface) {
     this.surface = surface;
     // Create an OpenGL ES 2.0 context.
@@ -124,23 +155,46 @@
     return buffer;
   }
 
-  // Compile & attach a |type| shader specified by |source| to |program|.
-  private static void addShaderTo(
-      int type, String source, int program) {
+  private int loadShader(int shaderType, String source) {
     int[] result = new int[] {
         GLES20.GL_FALSE
     };
-    int shader = GLES20.glCreateShader(type);
+    int shader = GLES20.glCreateShader(shaderType);
     GLES20.glShaderSource(shader, source);
     GLES20.glCompileShader(shader);
     GLES20.glGetShaderiv(shader, GLES20.GL_COMPILE_STATUS, result, 0);
-    abortUnless(result[0] == GLES20.GL_TRUE,
-        GLES20.glGetShaderInfoLog(shader) + ", source: " + source);
-    GLES20.glAttachShader(program, shader);
-    GLES20.glDeleteShader(shader);
-
+    if (result[0] != GLES20.GL_TRUE) {
+      Log.e(TAG, "Could not compile shader " + shaderType + ":" +
+          GLES20.glGetShaderInfoLog(shader));
+      throw new RuntimeException(GLES20.glGetShaderInfoLog(shader));
+    }
     checkNoGLES2Error();
-  }
+    return shader;
+}
+
+
+  private int createProgram(String vertexSource, String fragmentSource) {
+    int vertexShader = loadShader(GLES20.GL_VERTEX_SHADER, vertexSource);
+    int fragmentShader = loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentSource);
+    int program = GLES20.glCreateProgram();
+    if (program == 0) {
+      throw new RuntimeException("Could not create program");
+    }
+    GLES20.glAttachShader(program, vertexShader);
+    GLES20.glAttachShader(program, fragmentShader);
+    GLES20.glLinkProgram(program);
+    int[] linkStatus = new int[] {
+        GLES20.GL_FALSE
+    };
+    GLES20.glGetProgramiv(program, GLES20.GL_LINK_STATUS, linkStatus, 0);
+    if (linkStatus[0] != GLES20.GL_TRUE) {
+      Log.e(TAG, "Could not link program: " +
+          GLES20.glGetProgramInfoLog(program));
+      throw new RuntimeException(GLES20.glGetProgramInfoLog(program));
+    }
+    checkNoGLES2Error();
+    return program;
+}
 
   /**
    * Class used to display stream of YUV420 frames at particular location
@@ -149,9 +203,12 @@
    */
   private static class YuvImageRenderer implements VideoRenderer.Callbacks {
     private GLSurfaceView surface;
-    private int program;
-    private FloatBuffer textureVertices;
+    private int id;
+    private int yuvProgram;
+    private int oesProgram;
     private int[] yuvTextures = { -1, -1, -1 };
+    private int oesTexture = -1;
+    private float[] stMatrix = new float[16];
 
     // Render frame queue - accessed by two threads. renderFrame() call does
     // an offer (writing I420Frame to render) and early-returns (recording
@@ -159,8 +216,13 @@
     // copies frame to texture and then removes it from a queue using poll().
     LinkedBlockingQueue<I420Frame> frameToRenderQueue;
     // Local copy of incoming video frame.
-    private I420Frame frameToRender;
-    // Flag if renderFrame() was ever called
+    private I420Frame yuvFrameToRender;
+    private I420Frame textureFrameToRender;
+    // Type of video frame used for recent frame rendering.
+    private static enum RendererType { RENDERER_YUV, RENDERER_TEXTURE };
+    private RendererType rendererType;
+    private ScalingType scalingType;
+    // Flag if renderFrame() was ever called.
     boolean seenFrame;
     // Total number of video frames received in renderFrame() call.
     private int framesReceived;
@@ -174,40 +236,68 @@
     // Time in ns spent in draw() function.
     private long drawTimeNs;
     // Time in ns spent in renderFrame() function - including copying frame
-    // data to rendering planes
+    // data to rendering planes.
     private long copyTimeNs;
-
-    // Texture Coordinates mapping the entire texture.
-    private final FloatBuffer textureCoords = directNativeFloatBuffer(
-        new float[] {
-            0, 0, 0, 1, 1, 0, 1, 1
-        });
+    // Texture vertices.
+    private float texLeft;
+    private float texRight;
+    private float texTop;
+    private float texBottom;
+    private FloatBuffer textureVertices;
+    // Texture UV coordinates offsets.
+    private float texOffsetU;
+    private float texOffsetV;
+    private FloatBuffer textureCoords;
+    // Flag if texture vertices or coordinates update is needed.
+    private boolean updateTextureProperties;
+    // Viewport dimensions.
+    private int screenWidth;
+    private int screenHeight;
+    // Video dimension.
+    private int videoWidth;
+    private int videoHeight;
 
     private YuvImageRenderer(
-        GLSurfaceView surface,
-        int x, int y, int width, int height) {
-      Log.v(TAG, "YuvImageRenderer.Create");
+        GLSurfaceView surface, int id,
+        int x, int y, int width, int height,
+        ScalingType scalingType) {
+      Log.d(TAG, "YuvImageRenderer.Create id: " + id);
       this.surface = surface;
+      this.id = id;
+      this.scalingType = scalingType;
       frameToRenderQueue = new LinkedBlockingQueue<I420Frame>(1);
       // Create texture vertices.
-      float xLeft = (x - 50) / 50.0f;
-      float yTop = (50 - y) / 50.0f;
-      float xRight = Math.min(1.0f, (x + width - 50) / 50.0f);
-      float yBottom = Math.max(-1.0f, (50 - y - height) / 50.0f);
+      texLeft = (x - 50) / 50.0f;
+      texTop = (50 - y) / 50.0f;
+      texRight = Math.min(1.0f, (x + width - 50) / 50.0f);
+      texBottom = Math.max(-1.0f, (50 - y - height) / 50.0f);
       float textureVeticesFloat[] = new float[] {
-          xLeft, yTop,
-          xLeft, yBottom,
-          xRight, yTop,
-          xRight, yBottom
+          texLeft, texTop,
+          texLeft, texBottom,
+          texRight, texTop,
+          texRight, texBottom
       };
       textureVertices = directNativeFloatBuffer(textureVeticesFloat);
+      // Create texture UV coordinates.
+      texOffsetU = 0;
+      texOffsetV = 0;
+      float textureCoordinatesFloat[] = new float[] {
+          texOffsetU, texOffsetV,               // left top
+          texOffsetU, 1.0f - texOffsetV,        // left bottom
+          1.0f - texOffsetU, texOffsetV,        // right top
+          1.0f - texOffsetU, 1.0f - texOffsetV  // right bottom
+      };
+      textureCoords = directNativeFloatBuffer(textureCoordinatesFloat);
+      updateTextureProperties = false;
     }
 
-    private void createTextures(int program) {
-      Log.v(TAG, "  YuvImageRenderer.createTextures");
-      this.program = program;
+    private void createTextures(int yuvProgram, int oesProgram) {
+      Log.d(TAG, "  YuvImageRenderer.createTextures " + id + " on GL thread:" +
+          Thread.currentThread().getId());
+      this.yuvProgram = yuvProgram;
+      this.oesProgram = oesProgram;
 
-      // Generate 3 texture ids for Y/U/V and place them into |textures|.
+      // Generate 3 texture ids for Y/U/V and place them into |yuvTextures|.
       GLES20.glGenTextures(3, yuvTextures, 0);
       for (int i = 0; i < 3; i++)  {
         GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
@@ -226,39 +316,139 @@
       checkNoGLES2Error();
     }
 
+    private void checkAdjustTextureCoords() {
+      if (!updateTextureProperties ||
+          scalingType == ScalingType.SCALE_FILL) {
+        return;
+      }
+      // Re - calculate texture vertices to preserve video aspect ratio.
+      float texRight = this.texRight;
+      float texLeft = this.texLeft;
+      float texTop = this.texTop;
+      float texBottom = this.texBottom;
+      float displayWidth = (texRight - texLeft) * screenWidth / 2;
+      float displayHeight = (texTop - texBottom) * screenHeight / 2;
+      if (displayWidth > 1 && displayHeight > 1 &&
+          videoWidth > 1 && videoHeight > 1) {
+        float displayAspectRatio = displayWidth / displayHeight;
+        float videoAspectRatio = (float)videoWidth / videoHeight;
+        if (scalingType == ScalingType.SCALE_ASPECT_FIT) {
+          // Need to re-adjust vertices width or height to match video AR.
+          if (displayAspectRatio > videoAspectRatio) {
+            float deltaX = (displayWidth - videoAspectRatio * displayHeight) /
+                    instance.screenWidth;
+            texRight -= deltaX;
+            texLeft += deltaX;
+          } else {
+            float deltaY = (displayHeight - displayWidth / videoAspectRatio) /
+                    instance.screenHeight;
+            texTop -= deltaY;
+            texBottom += deltaY;
+          }
+          // Re-allocate vertices buffer to adjust to video aspect ratio.
+          float textureVeticesFloat[] = new float[] {
+              texLeft, texTop,
+              texLeft, texBottom,
+              texRight, texTop,
+              texRight, texBottom
+          };
+          textureVertices = directNativeFloatBuffer(textureVeticesFloat);
+        }
+        if (scalingType == ScalingType.SCALE_ASPECT_FILL) {
+          // Need to re-adjust UV coordinates to match display AR.
+          if (displayAspectRatio > videoAspectRatio) {
+            texOffsetV = (1.0f - videoAspectRatio / displayAspectRatio) / 2.0f;
+          } else {
+            texOffsetU = (1.0f - displayAspectRatio / videoAspectRatio) / 2.0f;
+          }
+          // Re-allocate coordinates buffer to adjust to display aspect ratio.
+          float textureCoordinatesFloat[] = new float[] {
+              texOffsetU, texOffsetV,               // left top
+              texOffsetU, 1.0f - texOffsetV,        // left bottom
+              1.0f - texOffsetU, texOffsetV,        // right top
+              1.0f - texOffsetU, 1.0f - texOffsetV  // right bottom
+          };
+          textureCoords = directNativeFloatBuffer(textureCoordinatesFloat);
+        }
+      }
+      updateTextureProperties = false;
+    }
+
     private void draw() {
-      long now = System.nanoTime();
       if (!seenFrame) {
         // No frame received yet - nothing to render.
         return;
       }
+      // Check if texture vertices/coordinates adjustment is required when
+      // screen orientation changes or video frame size changes.
+      checkAdjustTextureCoords();
+
+      long now = System.nanoTime();
+
       I420Frame frameFromQueue;
       synchronized (frameToRenderQueue) {
         frameFromQueue = frameToRenderQueue.peek();
         if (frameFromQueue != null && startTimeNs == -1) {
           startTimeNs = now;
         }
-        for (int i = 0; i < 3; ++i) {
-          int w = (i == 0) ? frameToRender.width : frameToRender.width / 2;
-          int h = (i == 0) ? frameToRender.height : frameToRender.height / 2;
-          GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
-          GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, yuvTextures[i]);
-          if (frameFromQueue != null) {
-            GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE,
-                w, h, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE,
-                frameFromQueue.yuvPlanes[i]);
+
+        if (rendererType == RendererType.RENDERER_YUV) {
+          // YUV textures rendering.
+          GLES20.glUseProgram(yuvProgram);
+
+          for (int i = 0; i < 3; ++i) {
+            GLES20.glActiveTexture(GLES20.GL_TEXTURE0 + i);
+            GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, yuvTextures[i]);
+            if (frameFromQueue != null) {
+              int w = (i == 0) ?
+                  frameFromQueue.width : frameFromQueue.width / 2;
+              int h = (i == 0) ?
+                  frameFromQueue.height : frameFromQueue.height / 2;
+              GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_LUMINANCE,
+                  w, h, 0, GLES20.GL_LUMINANCE, GLES20.GL_UNSIGNED_BYTE,
+                  frameFromQueue.yuvPlanes[i]);
+            }
           }
+        } else {
+          // External texture rendering.
+          GLES20.glUseProgram(oesProgram);
+
+          if (frameFromQueue != null) {
+            oesTexture = frameFromQueue.textureId;
+            if (frameFromQueue.textureObject instanceof SurfaceTexture) {
+              SurfaceTexture surfaceTexture =
+                  (SurfaceTexture) frameFromQueue.textureObject;
+              surfaceTexture.updateTexImage();
+              surfaceTexture.getTransformMatrix(stMatrix);
+            }
+          }
+          GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+          GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, oesTexture);
         }
+
         if (frameFromQueue != null) {
           frameToRenderQueue.poll();
         }
       }
-      int posLocation = GLES20.glGetAttribLocation(program, "in_pos");
+
+      if (rendererType == RendererType.RENDERER_YUV) {
+        GLES20.glUniform1i(GLES20.glGetUniformLocation(yuvProgram, "y_tex"), 0);
+        GLES20.glUniform1i(GLES20.glGetUniformLocation(yuvProgram, "u_tex"), 1);
+        GLES20.glUniform1i(GLES20.glGetUniformLocation(yuvProgram, "v_tex"), 2);
+      }
+
+      int posLocation = GLES20.glGetAttribLocation(yuvProgram, "in_pos");
+      if (posLocation == -1) {
+        throw new RuntimeException("Could not get attrib location for in_pos");
+      }
       GLES20.glEnableVertexAttribArray(posLocation);
       GLES20.glVertexAttribPointer(
           posLocation, 2, GLES20.GL_FLOAT, false, 0, textureVertices);
 
-      int texLocation = GLES20.glGetAttribLocation(program, "in_tc");
+      int texLocation = GLES20.glGetAttribLocation(yuvProgram, "in_tc");
+      if (texLocation == -1) {
+        throw new RuntimeException("Could not get attrib location for in_tc");
+      }
       GLES20.glEnableVertexAttribArray(texLocation);
       GLES20.glVertexAttribPointer(
           texLocation, 2, GLES20.GL_FLOAT, false, 0, textureCoords);
@@ -281,29 +471,41 @@
 
     private void logStatistics() {
       long timeSinceFirstFrameNs = System.nanoTime() - startTimeNs;
-      Log.v(TAG, "Frames received: " + framesReceived + ". Dropped: " +
-          framesDropped + ". Rendered: " + framesRendered);
+      Log.d(TAG, "ID: " + id + ". Type: " + rendererType +
+          ". Frames received: " + framesReceived +
+          ". Dropped: " + framesDropped + ". Rendered: " + framesRendered);
       if (framesReceived > 0 && framesRendered > 0) {
-        Log.v(TAG, "Duration: " + (int)(timeSinceFirstFrameNs / 1e6) +
+        Log.d(TAG, "Duration: " + (int)(timeSinceFirstFrameNs / 1e6) +
             " ms. FPS: " + (float)framesRendered * 1e9 / timeSinceFirstFrameNs);
-        Log.v(TAG, "Draw time: " +
+        Log.d(TAG, "Draw time: " +
             (int) (drawTimeNs / (1000 * framesRendered)) + " us. Copy time: " +
             (int) (copyTimeNs / (1000 * framesReceived)) + " us");
       }
     }
 
+    public void setScreenSize(final int screenWidth, final int screenHeight) {
+      this.screenWidth = screenWidth;
+      this.screenHeight = screenHeight;
+      updateTextureProperties = true;
+    }
+
     @Override
     public void setSize(final int width, final int height) {
-      Log.v(TAG, "YuvImageRenderer.setSize: " + width + " x " + height);
+      Log.d(TAG, "ID: " + id + ". YuvImageRenderer.setSize: " +
+          width + " x " + height);
+      videoWidth = width;
+      videoHeight = height;
       int[] strides = { width, width / 2, width / 2  };
       // Frame re-allocation need to be synchronized with copying
       // frame to textures in draw() function to avoid re-allocating
       // the frame while it is being copied.
       synchronized (frameToRenderQueue) {
-        // Clear rendering queue
+        // Clear rendering queue.
         frameToRenderQueue.poll();
-        // Re-allocate / allocate the frame
-        frameToRender = new I420Frame(width, height, strides, null);
+        // Re-allocate / allocate the frame.
+        yuvFrameToRender = new I420Frame(width, height, strides, null);
+        textureFrameToRender = new I420Frame(width, height, null, -1);
+        updateTextureProperties = true;
       }
     }
 
@@ -311,24 +513,26 @@
     public synchronized void renderFrame(I420Frame frame) {
       long now = System.nanoTime();
       framesReceived++;
-      // Check input frame parameters.
-      if (!(frame.yuvStrides[0] == frame.width &&
-          frame.yuvStrides[1] == frame.width / 2 &&
-          frame.yuvStrides[2] == frame.width / 2)) {
-        Log.e(TAG, "Incorrect strides " + frame.yuvStrides[0] + ", " +
-            frame.yuvStrides[1] + ", " + frame.yuvStrides[2]);
-        return;
-      }
       // Skip rendering of this frame if setSize() was not called.
-      if (frameToRender == null) {
+      if (yuvFrameToRender == null || textureFrameToRender == null) {
         framesDropped++;
         return;
       }
-      // Check incoming frame dimensions
-      if (frame.width != frameToRender.width ||
-          frame.height != frameToRender.height) {
-        throw new RuntimeException("Wrong frame size " +
-            frame.width + " x " + frame.height);
+      // Check input frame parameters.
+      if (frame.yuvFrame) {
+        if (!(frame.yuvStrides[0] == frame.width &&
+            frame.yuvStrides[1] == frame.width / 2 &&
+            frame.yuvStrides[2] == frame.width / 2)) {
+          Log.e(TAG, "Incorrect strides " + frame.yuvStrides[0] + ", " +
+              frame.yuvStrides[1] + ", " + frame.yuvStrides[2]);
+          return;
+        }
+        // Check incoming frame dimensions.
+        if (frame.width != yuvFrameToRender.width ||
+            frame.height != yuvFrameToRender.height) {
+          throw new RuntimeException("Wrong frame size " +
+              frame.width + " x " + frame.height);
+        }
       }
 
       if (frameToRenderQueue.size() > 0) {
@@ -336,20 +540,36 @@
         framesDropped++;
         return;
       }
-      frameToRender.copyFrom(frame);
+
+      // Create a local copy of the frame.
+      if (frame.yuvFrame) {
+        yuvFrameToRender.copyFrom(frame);
+        rendererType = RendererType.RENDERER_YUV;
+        frameToRenderQueue.offer(yuvFrameToRender);
+      } else {
+        textureFrameToRender.copyFrom(frame);
+        rendererType = RendererType.RENDERER_TEXTURE;
+        frameToRenderQueue.offer(textureFrameToRender);
+      }
       copyTimeNs += (System.nanoTime() - now);
-      frameToRenderQueue.offer(frameToRender);
       seenFrame = true;
+
+      // Request rendering.
       surface.requestRender();
     }
+
   }
 
   /** Passes GLSurfaceView to video renderer. */
   public static void setView(GLSurfaceView surface) {
-    Log.v(TAG, "VideoRendererGui.setView");
+    Log.d(TAG, "VideoRendererGui.setView");
     instance = new VideoRendererGui(surface);
   }
 
+  public static EGLContext getEGLContext() {
+    return eglContext;
+  }
+
   /**
    * Creates VideoRenderer with top left corner at (x, y) and resolution
    * (width, height). All parameters are in percentage of screen resolution.
@@ -360,6 +580,11 @@
     return new VideoRenderer(javaGuiRenderer);
   }
 
+  public static VideoRenderer.Callbacks createGuiRenderer(
+      int x, int y, int width, int height) {
+    return create(x, y, width, height);
+  }
+
   /**
    * Creates VideoRenderer.Callbacks with top left corner at (x, y) and
    * resolution (width, height). All parameters are in percentage of
@@ -379,7 +604,8 @@
           "Attempt to create yuv renderer before setting GLSurfaceView");
     }
     final YuvImageRenderer yuvImageRenderer = new YuvImageRenderer(
-        instance.surface, x, y, width, height);
+        instance.surface, instance.yuvImageRenderers.size(),
+        x, y, width, height, ScalingType.SCALE_ASPECT_FIT);
     synchronized (instance.yuvImageRenderers) {
       if (instance.onSurfaceCreatedCalled) {
         // onSurfaceCreated has already been called for VideoRendererGui -
@@ -388,7 +614,10 @@
         final CountDownLatch countDownLatch = new CountDownLatch(1);
         instance.surface.queueEvent(new Runnable() {
           public void run() {
-            yuvImageRenderer.createTextures(instance.program);
+            yuvImageRenderer.createTextures(
+                instance.yuvProgram, instance.oesProgram);
+            yuvImageRenderer.setScreenSize(
+                instance.screenWidth, instance.screenHeight);
             countDownLatch.countDown();
           }
         });
@@ -407,43 +636,40 @@
 
   @Override
   public void onSurfaceCreated(GL10 unused, EGLConfig config) {
-    Log.v(TAG, "VideoRendererGui.onSurfaceCreated");
+    Log.d(TAG, "VideoRendererGui.onSurfaceCreated");
+    // Store render EGL context
+    eglContext = EGL14.eglGetCurrentContext();
+    Log.d(TAG, "VideoRendererGui EGL Context: " + eglContext);
 
-    // Create program.
-    program = GLES20.glCreateProgram();
-    addShaderTo(GLES20.GL_VERTEX_SHADER, VERTEX_SHADER_STRING, program);
-    addShaderTo(GLES20.GL_FRAGMENT_SHADER, FRAGMENT_SHADER_STRING, program);
-
-    GLES20.glLinkProgram(program);
-    int[] result = new int[] {
-        GLES20.GL_FALSE
-    };
-    result[0] = GLES20.GL_FALSE;
-    GLES20.glGetProgramiv(program, GLES20.GL_LINK_STATUS, result, 0);
-    abortUnless(result[0] == GLES20.GL_TRUE,
-        GLES20.glGetProgramInfoLog(program));
-    GLES20.glUseProgram(program);
-
-    GLES20.glUniform1i(GLES20.glGetUniformLocation(program, "y_tex"), 0);
-    GLES20.glUniform1i(GLES20.glGetUniformLocation(program, "u_tex"), 1);
-    GLES20.glUniform1i(GLES20.glGetUniformLocation(program, "v_tex"), 2);
+    // Create YUV and OES programs.
+    yuvProgram = createProgram(VERTEX_SHADER_STRING,
+        YUV_FRAGMENT_SHADER_STRING);
+    oesProgram = createProgram(VERTEX_SHADER_STRING,
+        OES_FRAGMENT_SHADER_STRING);
 
     synchronized (yuvImageRenderers) {
       // Create textures for all images.
       for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
-        yuvImageRenderer.createTextures(program);
+        yuvImageRenderer.createTextures(yuvProgram, oesProgram);
       }
       onSurfaceCreatedCalled = true;
     }
     checkNoGLES2Error();
-    GLES20.glClearColor(0.0f, 0.0f, 0.3f, 1.0f);
+    GLES20.glClearColor(0.0f, 0.0f, 0.1f, 1.0f);
   }
 
   @Override
   public void onSurfaceChanged(GL10 unused, int width, int height) {
-    Log.v(TAG, "VideoRendererGui.onSurfaceChanged: " +
+    Log.d(TAG, "VideoRendererGui.onSurfaceChanged: " +
         width + " x " + height + "  ");
+    screenWidth = width;
+    screenHeight = height;
     GLES20.glViewport(0, 0, width, height);
+    synchronized (yuvImageRenderers) {
+      for (YuvImageRenderer yuvImageRenderer : yuvImageRenderers) {
+        yuvImageRenderer.setScreenSize(screenWidth, screenHeight);
+      }
+    }
   }
 
   @Override
diff --git a/app/webrtc/java/jni/peerconnection_jni.cc b/app/webrtc/java/jni/peerconnection_jni.cc
index 9b34fb5..27f69e4 100644
--- a/app/webrtc/java/jni/peerconnection_jni.cc
+++ b/app/webrtc/java/jni/peerconnection_jni.cc
@@ -77,23 +77,31 @@
 #include "third_party/libyuv/include/libyuv/convert_from.h"
 #include "third_party/libyuv/include/libyuv/video_common.h"
 #include "webrtc/base/bind.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/messagequeue.h"
 #include "webrtc/base/ssladapter.h"
+#include "webrtc/common_video/interface/texture_video_frame.h"
 #include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
 #include "webrtc/system_wrappers/interface/compile_assert.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/voice_engine/include/voe_base.h"
 
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
+#include <android/log.h>
+#include "webrtc/modules/video_capture/video_capture_internal.h"
+#include "webrtc/modules/video_render/video_render_internal.h"
 #include "webrtc/system_wrappers/interface/logcat_trace_context.h"
+#include "webrtc/system_wrappers/interface/tick_util.h"
 using webrtc::CodecSpecificInfo;
 using webrtc::DecodedImageCallback;
 using webrtc::EncodedImage;
 using webrtc::I420VideoFrame;
 using webrtc::LogcatTraceContext;
 using webrtc::RTPFragmentationHeader;
+using webrtc::TextureVideoFrame;
+using webrtc::TickTime;
 using webrtc::VideoCodec;
 #endif
 
@@ -111,6 +119,7 @@
 using webrtc::DataChannelInterface;
 using webrtc::DataChannelObserver;
 using webrtc::IceCandidateInterface;
+using webrtc::NativeHandle;
 using webrtc::MediaConstraintsInterface;
 using webrtc::MediaSourceInterface;
 using webrtc::MediaStreamInterface;
@@ -128,33 +137,18 @@
 using webrtc::VideoTrackVector;
 using webrtc::kVideoCodecVP8;
 
-// Abort the process if |x| is false, emitting |msg|.
-#define CHECK(x, msg)                                                          \
-  if (x) {} else {                                                             \
-    LOG(LS_ERROR) << __FILE__ << ":" << __LINE__ << ": " << msg;               \
-    abort();                                                                   \
-  }
-// Abort the process if |jni| has a Java exception pending, emitting |msg|.
-#define CHECK_EXCEPTION(jni, msg)                                              \
-  if (0) {} else {                                                             \
-    if (jni->ExceptionCheck()) {                                               \
-      jni->ExceptionDescribe();                                                \
-      jni->ExceptionClear();                                                   \
-      CHECK(0, msg);                                                           \
-    }                                                                          \
-  }
+// Abort the process if |jni| has a Java exception pending.
+// This macros uses the comma operator to execute ExceptionDescribe
+// and ExceptionClear ignoring their return values and sending ""
+// to the error stream.
+#define CHECK_EXCEPTION(jni)    \
+  CHECK(!jni->ExceptionCheck()) \
+      << (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
 
-// Helper that calls ptr->Release() and logs a useful message if that didn't
-// actually delete *ptr because of extra refcounts.
-#define CHECK_RELEASE(ptr)                                        \
-  do {                                                            \
-    int count = (ptr)->Release();                                 \
-    if (count != 0) {                                             \
-      LOG(LS_ERROR) << "Refcount unexpectedly not 0: " << (ptr)   \
-                    << ": " << count;                             \
-    }                                                             \
-    CHECK(!count, "Unexpected refcount");                         \
-  } while (0)
+// Helper that calls ptr->Release() and aborts the process with a useful
+// message if that didn't actually delete *ptr because of extra refcounts.
+#define CHECK_RELEASE(ptr) \
+  CHECK_EQ(0, (ptr)->Release()) << "Unexpected refcount."
 
 namespace {
 
@@ -166,18 +160,25 @@
 // were attached by the JVM because of a Java->native call.
 static pthread_key_t g_jni_ptr;
 
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
+// Set in PeerConnectionFactory_initializeAndroidGlobals().
+static bool factory_static_initialized = false;
+#endif
+
+
 // Return thread ID as a string.
 static std::string GetThreadId() {
   char buf[21];  // Big enough to hold a kuint64max plus terminating NULL.
-  CHECK(snprintf(buf, sizeof(buf), "%llu", syscall(__NR_gettid)) <= sizeof(buf),
-        "Thread id is bigger than uint64??");
+  CHECK_LT(snprintf(buf, sizeof(buf), "%llu", syscall(__NR_gettid)),
+           sizeof(buf))
+      << "Thread id is bigger than uint64??";
   return std::string(buf);
 }
 
 // Return the current thread's name.
 static std::string GetThreadName() {
   char name[17];
-  CHECK(prctl(PR_GET_NAME, name) == 0, "prctl(PR_GET_NAME) failed");
+  CHECK_EQ(0, prctl(PR_GET_NAME, name)) << "prctl(PR_GET_NAME) failed";
   name[16] = '\0';
   return std::string(name);
 }
@@ -187,8 +188,8 @@
   void* env = NULL;
   jint status = g_jvm->GetEnv(&env, JNI_VERSION_1_6);
   CHECK(((env != NULL) && (status == JNI_OK)) ||
-            ((env == NULL) && (status == JNI_EDETACHED)),
-        "Unexpected GetEnv return: " << status << ":" << env);
+        ((env == NULL) && (status == JNI_EDETACHED)))
+      << "Unexpected GetEnv return: " << status << ":" << env;
   return reinterpret_cast<JNIEnv*>(env);
 }
 
@@ -203,16 +204,16 @@
   if (!GetEnv())
     return;
 
-  CHECK(GetEnv() == prev_jni_ptr,
-        "Detaching from another thread: " << prev_jni_ptr << ":" << GetEnv());
+  CHECK(GetEnv() == prev_jni_ptr)
+      << "Detaching from another thread: " << prev_jni_ptr << ":" << GetEnv();
   jint status = g_jvm->DetachCurrentThread();
-  CHECK(status == JNI_OK, "Failed to detach thread: " << status);
-  CHECK(!GetEnv(), "Detaching was a successful no-op???");
+  CHECK(status == JNI_OK) << "Failed to detach thread: " << status;
+  CHECK(!GetEnv()) << "Detaching was a successful no-op???";
 }
 
 static void CreateJNIPtrKey() {
-  CHECK(!pthread_key_create(&g_jni_ptr, &ThreadDestructor),
-        "pthread_key_create");
+  CHECK(!pthread_key_create(&g_jni_ptr, &ThreadDestructor))
+      << "pthread_key_create";
 }
 
 // Return a |JNIEnv*| usable on this thread.  Attaches to |g_jvm| if necessary.
@@ -220,7 +221,8 @@
   JNIEnv* jni = GetEnv();
   if (jni)
     return jni;
-  CHECK(!pthread_getspecific(g_jni_ptr), "TLS has a JNIEnv* but not attached?");
+  CHECK(!pthread_getspecific(g_jni_ptr))
+      << "TLS has a JNIEnv* but not attached?";
 
   char* name = strdup((GetThreadName() + " - " + GetThreadId()).c_str());
   JavaVMAttachArgs args;
@@ -233,11 +235,11 @@
 #else
   JNIEnv* env = NULL;
 #endif
-  CHECK(!g_jvm->AttachCurrentThread(&env, &args), "Failed to attach thread");
+  CHECK(!g_jvm->AttachCurrentThread(&env, &args)) << "Failed to attach thread";
   free(name);
-  CHECK(env, "AttachCurrentThread handed back NULL!");
+  CHECK(env) << "AttachCurrentThread handed back NULL!";
   jni = reinterpret_cast<JNIEnv*>(env);
-  CHECK(!pthread_setspecific(g_jni_ptr, jni), "pthread_setspecific");
+  CHECK(!pthread_setspecific(g_jni_ptr, jni)) << "pthread_setspecific";
   return jni;
 }
 
@@ -269,10 +271,13 @@
     LoadClass(jni, "org/webrtc/DataChannel$Init");
     LoadClass(jni, "org/webrtc/DataChannel$State");
     LoadClass(jni, "org/webrtc/IceCandidate");
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
+    LoadClass(jni, "android/graphics/SurfaceTexture");
+    LoadClass(jni, "android/opengl/EGLContext");
     LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder");
     LoadClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
     LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder");
+    LoadClass(jni, "org/webrtc/MediaCodecVideoDecoder$DecoderOutputBufferInfo");
 #endif
     LoadClass(jni, "org/webrtc/MediaSource$State");
     LoadClass(jni, "org/webrtc/MediaStream");
@@ -289,7 +294,7 @@
   }
 
   ~ClassReferenceHolder() {
-    CHECK(classes_.empty(), "Must call FreeReferences() before dtor!");
+    CHECK(classes_.empty()) << "Must call FreeReferences() before dtor!";
   }
 
   void FreeReferences(JNIEnv* jni) {
@@ -302,20 +307,20 @@
 
   jclass GetClass(const std::string& name) {
     std::map<std::string, jclass>::iterator it = classes_.find(name);
-    CHECK(it != classes_.end(), "Unexpected GetClass() call for: " << name);
+    CHECK(it != classes_.end()) << "Unexpected GetClass() call for: " << name;
     return it->second;
   }
 
  private:
   void LoadClass(JNIEnv* jni, const std::string& name) {
     jclass localRef = jni->FindClass(name.c_str());
-    CHECK_EXCEPTION(jni, "error during FindClass: " << name);
-    CHECK(localRef, name);
+    CHECK_EXCEPTION(jni) << "error during FindClass: " << name;
+    CHECK(localRef) << name;
     jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
-    CHECK_EXCEPTION(jni, "error during NewGlobalRef: " << name);
-    CHECK(globalRef, name);
+    CHECK_EXCEPTION(jni) << "error during NewGlobalRef: " << name;
+    CHECK(globalRef) << name;
     bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
-    CHECK(inserted, "Duplicate class name: " << name);
+    CHECK(inserted) << "Duplicate class name: " << name;
   }
 
   std::map<std::string, jclass> classes_;
@@ -329,27 +334,26 @@
 jmethodID GetMethodID(
     JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
   jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
-  CHECK_EXCEPTION(jni,
-                  "error during GetMethodID: " << name << ", " << signature);
-  CHECK(m, name << ", " << signature);
+  CHECK_EXCEPTION(jni) << "error during GetMethodID: " << name << ", "
+                       << signature;
+  CHECK(m) << name << ", " << signature;
   return m;
 }
 
 jmethodID GetStaticMethodID(
     JNIEnv* jni, jclass c, const char* name, const char* signature) {
   jmethodID m = jni->GetStaticMethodID(c, name, signature);
-  CHECK_EXCEPTION(jni,
-                  "error during GetStaticMethodID: "
-                  << name << ", " << signature);
-  CHECK(m, name << ", " << signature);
+  CHECK_EXCEPTION(jni) << "error during GetStaticMethodID: " << name << ", "
+                       << signature;
+  CHECK(m) << name << ", " << signature;
   return m;
 }
 
 jfieldID GetFieldID(
     JNIEnv* jni, jclass c, const char* name, const char* signature) {
   jfieldID f = jni->GetFieldID(c, name, signature);
-  CHECK_EXCEPTION(jni, "error during GetFieldID");
-  CHECK(f, name << ", " << signature);
+  CHECK_EXCEPTION(jni) << "error during GetFieldID";
+  CHECK(f) << name << ", " << signature;
   return f;
 }
 
@@ -361,15 +365,15 @@
 
 jclass GetObjectClass(JNIEnv* jni, jobject object) {
   jclass c = jni->GetObjectClass(object);
-  CHECK_EXCEPTION(jni, "error during GetObjectClass");
-  CHECK(c, "");
+  CHECK_EXCEPTION(jni) << "error during GetObjectClass";
+  CHECK(c) << "GetObjectClass returned NULL";
   return c;
 }
 
 jobject GetObjectField(JNIEnv* jni, jobject object, jfieldID id) {
   jobject o = jni->GetObjectField(object, id);
-  CHECK_EXCEPTION(jni, "error during GetObjectField");
-  CHECK(o, "");
+  CHECK_EXCEPTION(jni) << "error during GetObjectField";
+  CHECK(o) << "GetObjectField returned NULL";
   return o;
 }
 
@@ -379,32 +383,32 @@
 
 jlong GetLongField(JNIEnv* jni, jobject object, jfieldID id) {
   jlong l = jni->GetLongField(object, id);
-  CHECK_EXCEPTION(jni, "error during GetLongField");
+  CHECK_EXCEPTION(jni) << "error during GetLongField";
   return l;
 }
 
 jint GetIntField(JNIEnv* jni, jobject object, jfieldID id) {
   jint i = jni->GetIntField(object, id);
-  CHECK_EXCEPTION(jni, "error during GetIntField");
+  CHECK_EXCEPTION(jni) << "error during GetIntField";
   return i;
 }
 
 bool GetBooleanField(JNIEnv* jni, jobject object, jfieldID id) {
   jboolean b = jni->GetBooleanField(object, id);
-  CHECK_EXCEPTION(jni, "error during GetBooleanField");
+  CHECK_EXCEPTION(jni) << "error during GetBooleanField";
   return b;
 }
 
 jobject NewGlobalRef(JNIEnv* jni, jobject o) {
   jobject ret = jni->NewGlobalRef(o);
-  CHECK_EXCEPTION(jni, "error during NewGlobalRef");
-  CHECK(ret, "");
+  CHECK_EXCEPTION(jni) << "error during NewGlobalRef";
+  CHECK(ret);
   return ret;
 }
 
 void DeleteGlobalRef(JNIEnv* jni, jobject o) {
   jni->DeleteGlobalRef(o);
-  CHECK_EXCEPTION(jni, "error during DeleteGlobalRef");
+  CHECK_EXCEPTION(jni) << "error during DeleteGlobalRef";
 }
 
 // Given a jweak reference, allocate a (strong) local reference scoped to the
@@ -414,12 +418,12 @@
  public:
   WeakRef(JNIEnv* jni, jweak ref)
       : jni_(jni), obj_(jni_->NewLocalRef(ref)) {
-    CHECK_EXCEPTION(jni, "error during NewLocalRef");
+    CHECK_EXCEPTION(jni) << "error during NewLocalRef";
   }
   ~WeakRef() {
     if (obj_) {
       jni_->DeleteLocalRef(obj_);
-      CHECK_EXCEPTION(jni_, "error during DeleteLocalRef");
+      CHECK_EXCEPTION(jni_) << "error during DeleteLocalRef";
     }
   }
   jobject obj() { return obj_; }
@@ -435,7 +439,7 @@
 class ScopedLocalRefFrame {
  public:
   explicit ScopedLocalRefFrame(JNIEnv* jni) : jni_(jni) {
-    CHECK(!jni_->PushLocalFrame(0), "Failed to PushLocalFrame");
+    CHECK(!jni_->PushLocalFrame(0)) << "Failed to PushLocalFrame";
   }
   ~ScopedLocalRefFrame() {
     jni_->PopLocalFrame(NULL);
@@ -478,9 +482,9 @@
       jni, state_class, "values", ("()[L" + state_class_name  + ";").c_str());
   jobjectArray state_values = static_cast<jobjectArray>(
       jni->CallStaticObjectMethod(state_class, state_values_id));
-  CHECK_EXCEPTION(jni, "error during CallStaticObjectMethod");
+  CHECK_EXCEPTION(jni) << "error during CallStaticObjectMethod";
   jobject ret = jni->GetObjectArrayElement(state_values, index);
-  CHECK_EXCEPTION(jni, "error during GetObjectArrayElement");
+  CHECK_EXCEPTION(jni) << "error during GetObjectArrayElement";
   return ret;
 }
 
@@ -488,18 +492,18 @@
 static jstring JavaStringFromStdString(JNIEnv* jni, const std::string& native) {
   UnicodeString ustr(UnicodeString::fromUTF8(native));
   jstring jstr = jni->NewString(ustr.getBuffer(), ustr.length());
-  CHECK_EXCEPTION(jni, "error during NewString");
+  CHECK_EXCEPTION(jni) << "error during NewString";
   return jstr;
 }
 
 // Given a (UTF-16) jstring return a new UTF-8 native string.
 static std::string JavaToStdString(JNIEnv* jni, const jstring& j_string) {
   const jchar* jchars = jni->GetStringChars(j_string, NULL);
-  CHECK_EXCEPTION(jni, "Error during GetStringChars");
+  CHECK_EXCEPTION(jni) << "Error during GetStringChars";
   UnicodeString ustr(jchars, jni->GetStringLength(j_string));
-  CHECK_EXCEPTION(jni, "Error during GetStringLength");
+  CHECK_EXCEPTION(jni) << "Error during GetStringLength";
   jni->ReleaseStringChars(j_string, jchars);
-  CHECK_EXCEPTION(jni, "Error during ReleaseStringChars");
+  CHECK_EXCEPTION(jni) << "Error during ReleaseStringChars";
   std::string ret;
   return ustr.toUTF8String(ret);
 }
@@ -559,7 +563,7 @@
   virtual void OnIceCandidate(const IceCandidateInterface* candidate) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     std::string sdp;
-    CHECK(candidate->ToString(&sdp), "got so far: " << sdp);
+    CHECK(candidate->ToString(&sdp)) << "got so far: " << sdp;
     jclass candidate_class = FindClass(jni(), "org/webrtc/IceCandidate");
     jmethodID ctor = GetMethodID(jni(), candidate_class,
         "<init>", "(Ljava/lang/String;ILjava/lang/String;)V");
@@ -567,18 +571,18 @@
     jstring j_sdp = JavaStringFromStdString(jni(), sdp);
     jobject j_candidate = jni()->NewObject(
         candidate_class, ctor, j_mid, candidate->sdp_mline_index(), j_sdp);
-    CHECK_EXCEPTION(jni(), "error during NewObject");
+    CHECK_EXCEPTION(jni()) << "error during NewObject";
     jmethodID m = GetMethodID(jni(), *j_observer_class_,
                               "onIceCandidate", "(Lorg/webrtc/IceCandidate;)V");
     jni()->CallVoidMethod(*j_observer_global_, m, j_candidate);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnError() OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onError", "()V");
     jni()->CallVoidMethod(*j_observer_global_, m);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnSignalingChange(
@@ -590,7 +594,7 @@
     jobject new_state_enum =
         JavaEnumFromIndex(jni(), "PeerConnection$SignalingState", new_state);
     jni()->CallVoidMethod(*j_observer_global_, m, new_state_enum);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnIceConnectionChange(
@@ -602,7 +606,7 @@
     jobject new_state_enum = JavaEnumFromIndex(
         jni(), "PeerConnection$IceConnectionState", new_state);
     jni()->CallVoidMethod(*j_observer_global_, m, new_state_enum);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnIceGatheringChange(
@@ -614,14 +618,14 @@
     jobject new_state_enum = JavaEnumFromIndex(
         jni(), "PeerConnection$IceGatheringState", new_state);
     jni()->CallVoidMethod(*j_observer_global_, m, new_state_enum);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnAddStream(MediaStreamInterface* stream) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     jobject j_stream = jni()->NewObject(
         *j_media_stream_class_, j_media_stream_ctor_, (jlong)stream);
-    CHECK_EXCEPTION(jni(), "error during NewObject");
+    CHECK_EXCEPTION(jni()) << "error during NewObject";
 
     AudioTrackVector audio_tracks = stream->GetAudioTracks();
     for (size_t i = 0; i < audio_tracks.size(); ++i) {
@@ -629,7 +633,7 @@
       jstring id = JavaStringFromStdString(jni(), track->id());
       jobject j_track = jni()->NewObject(
           *j_audio_track_class_, j_audio_track_ctor_, (jlong)track, id);
-      CHECK_EXCEPTION(jni(), "error during NewObject");
+      CHECK_EXCEPTION(jni()) << "error during NewObject";
       jfieldID audio_tracks_id = GetFieldID(jni(),
                                             *j_media_stream_class_,
                                             "audioTracks",
@@ -640,8 +644,8 @@
                                   "add",
                                   "(Ljava/lang/Object;)Z");
       jboolean added = jni()->CallBooleanMethod(audio_tracks, add, j_track);
-      CHECK_EXCEPTION(jni(), "error during CallBooleanMethod");
-      CHECK(added, "");
+      CHECK_EXCEPTION(jni()) << "error during CallBooleanMethod";
+      CHECK(added);
     }
 
     VideoTrackVector video_tracks = stream->GetVideoTracks();
@@ -650,7 +654,7 @@
       jstring id = JavaStringFromStdString(jni(), track->id());
       jobject j_track = jni()->NewObject(
           *j_video_track_class_, j_video_track_ctor_, (jlong)track, id);
-      CHECK_EXCEPTION(jni(), "error during NewObject");
+      CHECK_EXCEPTION(jni()) << "error during NewObject";
       jfieldID video_tracks_id = GetFieldID(jni(),
                                             *j_media_stream_class_,
                                             "videoTracks",
@@ -661,22 +665,22 @@
                                   "add",
                                   "(Ljava/lang/Object;)Z");
       jboolean added = jni()->CallBooleanMethod(video_tracks, add, j_track);
-      CHECK_EXCEPTION(jni(), "error during CallBooleanMethod");
-      CHECK(added, "");
+      CHECK_EXCEPTION(jni()) << "error during CallBooleanMethod";
+      CHECK(added);
     }
     streams_[stream] = jni()->NewWeakGlobalRef(j_stream);
-    CHECK_EXCEPTION(jni(), "error during NewWeakGlobalRef");
+    CHECK_EXCEPTION(jni()) << "error during NewWeakGlobalRef";
 
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onAddStream",
                               "(Lorg/webrtc/MediaStream;)V");
     jni()->CallVoidMethod(*j_observer_global_, m, j_stream);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnRemoveStream(MediaStreamInterface* stream) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     NativeToJavaStreamsMap::iterator it = streams_.find(stream);
-    CHECK(it != streams_.end(), "unexpected stream: " << std::hex << stream);
+    CHECK(it != streams_.end()) << "unexpected stream: " << std::hex << stream;
 
     WeakRef s(jni(), it->second);
     streams_.erase(it);
@@ -686,14 +690,14 @@
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onRemoveStream",
                               "(Lorg/webrtc/MediaStream;)V");
     jni()->CallVoidMethod(*j_observer_global_, m, s.obj());
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnDataChannel(DataChannelInterface* channel) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     jobject j_channel = jni()->NewObject(
         *j_data_channel_class_, j_data_channel_ctor_, (jlong)channel);
-    CHECK_EXCEPTION(jni(), "error during NewObject");
+    CHECK_EXCEPTION(jni()) << "error during NewObject";
 
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onDataChannel",
                               "(Lorg/webrtc/DataChannel;)V");
@@ -704,9 +708,9 @@
     // CallVoidMethod above as Java code might call back into native code and be
     // surprised to see a refcount of 2.
     int bumped_count = channel->AddRef();
-    CHECK(bumped_count == 2, "Unexpected refcount OnDataChannel");
+    CHECK(bumped_count == 2) << "Unexpected refcount OnDataChannel";
 
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnRenegotiationNeeded() OVERRIDE {
@@ -714,11 +718,11 @@
     jmethodID m =
         GetMethodID(jni(), *j_observer_class_, "onRenegotiationNeeded", "()V");
     jni()->CallVoidMethod(*j_observer_global_, m);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   void SetConstraints(ConstraintsWrapper* constraints) {
-    CHECK(!constraints_.get(), "constraints already set!");
+    CHECK(!constraints_.get()) << "constraints already set!";
     constraints_.reset(constraints);
   }
 
@@ -777,29 +781,29 @@
     jmethodID j_iterator_id = GetMethodID(jni,
         GetObjectClass(jni, j_list), "iterator", "()Ljava/util/Iterator;");
     jobject j_iterator = jni->CallObjectMethod(j_list, j_iterator_id);
-    CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+    CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
     jmethodID j_has_next = GetMethodID(jni,
         GetObjectClass(jni, j_iterator), "hasNext", "()Z");
     jmethodID j_next = GetMethodID(jni,
         GetObjectClass(jni, j_iterator), "next", "()Ljava/lang/Object;");
     while (jni->CallBooleanMethod(j_iterator, j_has_next)) {
-      CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+      CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
       jobject entry = jni->CallObjectMethod(j_iterator, j_next);
-      CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+      CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
       jmethodID get_key = GetMethodID(jni,
           GetObjectClass(jni, entry), "getKey", "()Ljava/lang/String;");
       jstring j_key = reinterpret_cast<jstring>(
           jni->CallObjectMethod(entry, get_key));
-      CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+      CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
       jmethodID get_value = GetMethodID(jni,
           GetObjectClass(jni, entry), "getValue", "()Ljava/lang/String;");
       jstring j_value = reinterpret_cast<jstring>(
           jni->CallObjectMethod(entry, get_value));
-      CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+      CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
       field->push_back(Constraint(JavaToStdString(jni, j_key),
                                   JavaToStdString(jni, j_value)));
     }
-    CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+    CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
   }
 
   Constraints mandatory_;
@@ -809,7 +813,7 @@
 static jobject JavaSdpFromNativeSdp(
     JNIEnv* jni, const SessionDescriptionInterface* desc) {
   std::string sdp;
-  CHECK(desc->ToString(&sdp), "got so far: " << sdp);
+  CHECK(desc->ToString(&sdp)) << "got so far: " << sdp;
   jstring j_description = JavaStringFromStdString(jni, sdp);
 
   jclass j_type_class = FindClass(
@@ -820,7 +824,7 @@
   jstring j_type_string = JavaStringFromStdString(jni, desc->type());
   jobject j_type = jni->CallStaticObjectMethod(
       j_type_class, j_type_from_canonical, j_type_string);
-  CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+  CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
 
   jclass j_sdp_class = FindClass(jni, "org/webrtc/SessionDescription");
   jmethodID j_sdp_ctor = GetMethodID(
@@ -828,7 +832,7 @@
       "(Lorg/webrtc/SessionDescription$Type;Ljava/lang/String;)V");
   jobject j_sdp = jni->NewObject(
       j_sdp_class, j_sdp_ctor, j_type, j_description);
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_EXCEPTION(jni) << "error during NewObject";
   return j_sdp;
 }
 
@@ -849,7 +853,7 @@
     ScopedLocalRefFrame local_ref_frame(jni());
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onSetSuccess", "()V");
     jni()->CallVoidMethod(*j_observer_global_, m);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   // Can't mark OVERRIDE because of templating.
@@ -860,7 +864,7 @@
         "(Lorg/webrtc/SessionDescription;)V");
     jobject j_sdp = JavaSdpFromNativeSdp(jni(), desc);
     jni()->CallVoidMethod(*j_observer_global_, m, j_sdp);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
  protected:
@@ -871,7 +875,7 @@
                               "(Ljava/lang/String;)V");
     jstring j_error_string = JavaStringFromStdString(jni(), error);
     jni()->CallVoidMethod(*j_observer_global_, m, j_error_string);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   JNIEnv* jni() {
@@ -917,11 +921,11 @@
   DataChannelObserverWrapper(JNIEnv* jni, jobject j_observer)
       : j_observer_global_(jni, j_observer),
         j_observer_class_(jni, GetObjectClass(jni, j_observer)),
+        j_buffer_class_(jni, FindClass(jni, "org/webrtc/DataChannel$Buffer")),
         j_on_state_change_mid_(GetMethodID(jni, *j_observer_class_,
                                            "onStateChange", "()V")),
         j_on_message_mid_(GetMethodID(jni, *j_observer_class_, "onMessage",
                                       "(Lorg/webrtc/DataChannel$Buffer;)V")),
-        j_buffer_class_(jni, FindClass(jni, "org/webrtc/DataChannel$Buffer")),
         j_buffer_ctor_(GetMethodID(jni, *j_buffer_class_,
                                    "<init>", "(Ljava/nio/ByteBuffer;Z)V")) {
   }
@@ -931,7 +935,7 @@
   virtual void OnStateChange() OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     jni()->CallVoidMethod(*j_observer_global_, j_on_state_change_mid_);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
   virtual void OnMessage(const DataBuffer& buffer) OVERRIDE {
@@ -942,7 +946,7 @@
     jobject j_buffer = jni()->NewObject(*j_buffer_class_, j_buffer_ctor_,
                                         byte_buffer, buffer.binary);
     jni()->CallVoidMethod(*j_observer_global_, j_on_message_mid_, j_buffer);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
  private:
@@ -985,7 +989,7 @@
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onComplete",
                               "([Lorg/webrtc/StatsReport;)V");
     jni()->CallVoidMethod(*j_observer_global_, m, j_reports);
-    CHECK_EXCEPTION(jni(), "error during CallVoidMethod");
+    CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
 
  private:
@@ -1067,6 +1071,38 @@
   scoped_ptr<cricket::VideoRenderer> renderer_;
 };
 
+// Wrapper for texture object in TextureVideoFrame.
+class NativeHandleImpl : public NativeHandle {
+ public:
+  NativeHandleImpl() :
+    ref_count_(0), texture_object_(NULL), texture_id_(-1) {}
+  virtual ~NativeHandleImpl() {}
+  virtual int32_t AddRef() {
+    return ++ref_count_;
+  }
+  virtual int32_t Release() {
+    return --ref_count_;
+  }
+  virtual void* GetHandle() {
+    return texture_object_;
+  }
+  int GetTextureId() {
+    return texture_id_;
+  }
+  void SetTextureObject(void *texture_object, int texture_id) {
+    texture_object_ = reinterpret_cast<jobject>(texture_object);
+    texture_id_ = texture_id;
+  }
+  int32_t ref_count() {
+    return ref_count_;
+  }
+
+ private:
+  int32_t ref_count_;
+  jobject texture_object_;
+  int32_t texture_id_;
+};
+
 // Wrapper dispatching webrtc::VideoRendererInterface to a Java VideoRenderer
 // instance.
 class JavaVideoRendererWrapper : public VideoRendererInterface {
@@ -1080,10 +1116,13 @@
             "(Lorg/webrtc/VideoRenderer$I420Frame;)V")),
         j_frame_class_(jni,
                        FindClass(jni, "org/webrtc/VideoRenderer$I420Frame")),
-        j_frame_ctor_id_(GetMethodID(
+        j_i420_frame_ctor_id_(GetMethodID(
             jni, *j_frame_class_, "<init>", "(II[I[Ljava/nio/ByteBuffer;)V")),
+        j_texture_frame_ctor_id_(GetMethodID(
+            jni, *j_frame_class_, "<init>",
+            "(IILjava/lang/Object;I)V")),
         j_byte_buffer_class_(jni, FindClass(jni, "java/nio/ByteBuffer")) {
-    CHECK_EXCEPTION(jni, "");
+    CHECK_EXCEPTION(jni);
   }
 
   virtual ~JavaVideoRendererWrapper() {}
@@ -1091,19 +1130,25 @@
   virtual void SetSize(int width, int height) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
     jni()->CallVoidMethod(*j_callbacks_, j_set_size_id_, width, height);
-    CHECK_EXCEPTION(jni(), "");
+    CHECK_EXCEPTION(jni());
   }
 
   virtual void RenderFrame(const cricket::VideoFrame* frame) OVERRIDE {
     ScopedLocalRefFrame local_ref_frame(jni());
-    jobject j_frame = CricketToJavaFrame(frame);
-    jni()->CallVoidMethod(*j_callbacks_, j_render_frame_id_, j_frame);
-    CHECK_EXCEPTION(jni(), "");
+    if (frame->GetNativeHandle() != NULL) {
+      jobject j_frame = CricketToJavaTextureFrame(frame);
+      jni()->CallVoidMethod(*j_callbacks_, j_render_frame_id_, j_frame);
+      CHECK_EXCEPTION(jni());
+    } else {
+      jobject j_frame = CricketToJavaI420Frame(frame);
+      jni()->CallVoidMethod(*j_callbacks_, j_render_frame_id_, j_frame);
+      CHECK_EXCEPTION(jni());
+    }
   }
 
  private:
   // Return a VideoRenderer.I420Frame referring to the data in |frame|.
-  jobject CricketToJavaFrame(const cricket::VideoFrame* frame) {
+  jobject CricketToJavaI420Frame(const cricket::VideoFrame* frame) {
     jintArray strides = jni()->NewIntArray(3);
     jint* strides_array = jni()->GetIntArrayElements(strides, NULL);
     strides_array[0] = frame->GetYPitch();
@@ -1122,10 +1167,21 @@
     jni()->SetObjectArrayElement(planes, 1, u_buffer);
     jni()->SetObjectArrayElement(planes, 2, v_buffer);
     return jni()->NewObject(
-        *j_frame_class_, j_frame_ctor_id_,
+        *j_frame_class_, j_i420_frame_ctor_id_,
         frame->GetWidth(), frame->GetHeight(), strides, planes);
   }
 
+  // Return a VideoRenderer.I420Frame referring texture object in |frame|.
+  jobject CricketToJavaTextureFrame(const cricket::VideoFrame* frame) {
+    NativeHandleImpl* handle =
+        reinterpret_cast<NativeHandleImpl*>(frame->GetNativeHandle());
+    jobject texture_object = reinterpret_cast<jobject>(handle->GetHandle());
+    int texture_id = handle->GetTextureId();
+    return jni()->NewObject(
+        *j_frame_class_, j_texture_frame_ctor_id_,
+        frame->GetWidth(), frame->GetHeight(), texture_object, texture_id);
+  }
+
   JNIEnv* jni() {
     return AttachCurrentThreadIfNeeded();
   }
@@ -1134,16 +1190,16 @@
   jmethodID j_set_size_id_;
   jmethodID j_render_frame_id_;
   ScopedGlobalRef<jclass> j_frame_class_;
-  jmethodID j_frame_ctor_id_;
+  jmethodID j_i420_frame_ctor_id_;
+  jmethodID j_texture_frame_ctor_id_;
   ScopedGlobalRef<jclass> j_byte_buffer_class_;
 };
 
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 // TODO(fischman): consider pulling MediaCodecVideoEncoder out of this file and
 // into its own .h/.cc pair, if/when the JNI helper stuff above is extracted
 // from this file.
 
-#include <android/log.h>
 //#define TRACK_BUFFER_TIMING
 #define TAG "MediaCodecVideo"
 #ifdef TRACK_BUFFER_TIMING
@@ -1169,6 +1225,14 @@
 
 // Arbitrary interval to poll the codec for new outputs.
 enum { kMediaCodecPollMs = 10 };
+// Media codec maximum output buffer ready timeout.
+enum { kMediaCodecTimeoutMs = 500 };
+// Interval to print codec statistics (bitrate, fps, encoding/decoding time).
+enum { kMediaCodecStatisticsIntervalMs = 3000 };
+
+static int64_t GetCurrentTimeMs() {
+  return TickTime::Now().Ticks() / 1000000LL;
+}
 
 // MediaCodecVideoEncoder is a webrtc::VideoEncoder implementation that uses
 // Android's MediaCodec SDK API behind the scenes to implement (hopefully)
@@ -1224,9 +1288,6 @@
   int32_t ReleaseOnCodecThread();
   int32_t SetRatesOnCodecThread(uint32_t new_bit_rate, uint32_t frame_rate);
 
-  // Reset parameters valid between InitEncode() & Release() (see below).
-  void ResetParameters(JNIEnv* jni);
-
   // Helper accessors for MediaCodecVideoEncoder$OutputBufferInfo members.
   int GetOutputBufferInfoIndex(JNIEnv* jni, jobject j_output_buffer_info);
   jobject GetOutputBufferInfoBuffer(JNIEnv* jni, jobject j_output_buffer_info);
@@ -1269,11 +1330,21 @@
   enum libyuv::FourCC encoder_fourcc_; // Encoder color space format.
   int last_set_bitrate_kbps_;  // Last-requested bitrate in kbps.
   int last_set_fps_;  // Last-requested frame rate.
-  int frames_received_; // Number of frames received by encoder.
-  int frames_dropped_; // Number of frames dropped by encoder.
-  int frames_in_queue_; // Number of frames in encoder queue.
-  int64_t last_input_timestamp_ms_; // Timestamp of last received yuv frame.
-  int64_t last_output_timestamp_ms_; // Timestamp of last encoded frame.
+  int64_t current_timestamp_us_;  // Current frame timestamps in us.
+  int frames_received_;  // Number of frames received by encoder.
+  int frames_dropped_;  // Number of frames dropped by encoder.
+  int frames_resolution_update_;  // Number of frames with new codec resolution.
+  int frames_in_queue_;  // Number of frames in encoder queue.
+  int64_t start_time_ms_;  // Start time for statistics.
+  int current_frames_;  // Number of frames in the current statistics interval.
+  int current_bytes_;  // Encoded bytes in the current statistics interval.
+  int current_encoding_time_ms_;  // Overall encoding time in the current second
+  int64_t last_input_timestamp_ms_;  // Timestamp of last received yuv frame.
+  int64_t last_output_timestamp_ms_;  // Timestamp of last encoded frame.
+  std::vector<int32_t> timestamps_;  // Video frames timestamp queue.
+  std::vector<int64_t> render_times_ms_;  // Video frames render time queue.
+  std::vector<int64_t> frame_rtc_times_ms_;  // Time when video frame is sent to
+                                             // encoder input.
   // Frame size in bytes fed to MediaCodec.
   int yuv_size_;
   // True only when between a callback_->Encoded() call return a positive value
@@ -1284,24 +1355,24 @@
 };
 
 MediaCodecVideoEncoder::~MediaCodecVideoEncoder() {
-  // We depend on ResetParameters() to ensure no more callbacks to us after we
-  // are deleted, so assert it here.
-  CHECK(width_ == 0, "Release() should have been called");
+  // Call Release() to ensure no more callbacks to us after we are deleted.
+  Release();
 }
 
 MediaCodecVideoEncoder::MediaCodecVideoEncoder(JNIEnv* jni)
-    : callback_(NULL),
-      codec_thread_(new Thread()),
-      j_media_codec_video_encoder_class_(
-          jni,
-          FindClass(jni, "org/webrtc/MediaCodecVideoEncoder")),
-      j_media_codec_video_encoder_(
-          jni,
-          jni->NewObject(*j_media_codec_video_encoder_class_,
-                         GetMethodID(jni,
-                                     *j_media_codec_video_encoder_class_,
-                                     "<init>",
-                                     "()V"))) {
+  : callback_(NULL),
+    inited_(false),
+    codec_thread_(new Thread()),
+    j_media_codec_video_encoder_class_(
+        jni,
+        FindClass(jni, "org/webrtc/MediaCodecVideoEncoder")),
+    j_media_codec_video_encoder_(
+        jni,
+        jni->NewObject(*j_media_codec_video_encoder_class_,
+                       GetMethodID(jni,
+                                   *j_media_codec_video_encoder_class_,
+                                   "<init>",
+                                   "()V"))) {
   ScopedLocalRefFrame local_ref_frame(jni);
   // It would be nice to avoid spinning up a new thread per MediaCodec, and
   // instead re-use e.g. the PeerConnectionFactory's |worker_thread_|, but bug
@@ -1311,9 +1382,7 @@
   // in the bug, we have a problem.  For now work around that with a dedicated
   // thread.
   codec_thread_->SetName("MediaCodecVideoEncoder", NULL);
-  CHECK(codec_thread_->Start(), "Failed to start MediaCodecVideoEncoder");
-
-  ResetParameters(jni);
+  CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoEncoder";
 
   jclass j_output_buffer_info_class =
       FindClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
@@ -1347,7 +1416,7 @@
       GetFieldID(jni, j_output_buffer_info_class, "isKeyFrame", "Z");
   j_info_presentation_timestamp_us_field_ = GetFieldID(
       jni, j_output_buffer_info_class, "presentationTimestampUs", "J");
-  CHECK_EXCEPTION(jni, "MediaCodecVideoEncoder ctor failed");
+  CHECK_EXCEPTION(jni) << "MediaCodecVideoEncoder ctor failed";
 }
 
 int32_t MediaCodecVideoEncoder::InitEncode(
@@ -1355,7 +1424,7 @@
     int32_t /* number_of_cores */,
     uint32_t /* max_payload_size */) {
   // Factory should guard against other codecs being used with us.
-  CHECK(codec_settings->codecType == kVideoCodecVP8, "Unsupported codec");
+  CHECK(codec_settings->codecType == kVideoCodecVP8) << "Unsupported codec";
 
   return codec_thread_->Invoke<int32_t>(
       Bind(&MediaCodecVideoEncoder::InitEncodeOnCodecThread,
@@ -1407,9 +1476,12 @@
 
   // We only ever send one message to |this| directly (not through a Bind()'d
   // functor), so expect no ID/data.
-  CHECK(!msg->message_id, "Unexpected message!");
-  CHECK(!msg->pdata, "Unexpected message!");
+  CHECK(!msg->message_id) << "Unexpected message!";
+  CHECK(!msg->pdata) << "Unexpected message!";
   CheckOnCodecThread();
+  if (!inited_) {
+    return;
+  }
 
   // It would be nice to recover from a failure here if one happened, but it's
   // unclear how to signal such a failure to the app, so instead we stay silent
@@ -1419,16 +1491,16 @@
 }
 
 void MediaCodecVideoEncoder::CheckOnCodecThread() {
-  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread(),
-        "Running on wrong thread!");
+  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
+      << "Running on wrong thread!";
 }
 
 void MediaCodecVideoEncoder::ResetCodec() {
   ALOGE("ResetCodec");
   if (Release() != WEBRTC_VIDEO_CODEC_OK ||
       codec_thread_->Invoke<int32_t>(Bind(
-          &MediaCodecVideoEncoder::InitEncodeOnCodecThread, this, 0, 0, 0, 0))
-            != WEBRTC_VIDEO_CODEC_OK) {
+          &MediaCodecVideoEncoder::InitEncodeOnCodecThread, this,
+          width_, height_, 0, 0)) != WEBRTC_VIDEO_CODEC_OK) {
     // TODO(fischman): wouldn't it be nice if there was a way to gracefully
     // degrade to a SW encoder at this point?  There isn't one AFAICT :(
     // https://code.google.com/p/webrtc/issues/detail?id=2920
@@ -1440,12 +1512,13 @@
   CheckOnCodecThread();
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
-  ALOGD("InitEncodeOnCodecThread %d x %d", width, height);
 
-  if (width == 0) {
-    width = width_;
-    height = height_;
+  ALOGD("InitEncodeOnCodecThread %d x %d. Bitrate: %d kbps. Fps: %d",
+      width, height, kbps, fps);
+  if (kbps == 0) {
     kbps = last_set_bitrate_kbps_;
+  }
+  if (fps == 0) {
     fps = last_set_fps_;
   }
 
@@ -1456,9 +1529,19 @@
   yuv_size_ = width_ * height_ * 3 / 2;
   frames_received_ = 0;
   frames_dropped_ = 0;
+  frames_resolution_update_ = 0;
   frames_in_queue_ = 0;
+  current_timestamp_us_ = 0;
+  start_time_ms_ = GetCurrentTimeMs();
+  current_frames_ = 0;
+  current_bytes_ = 0;
+  current_encoding_time_ms_ = 0;
   last_input_timestamp_ms_ = -1;
   last_output_timestamp_ms_ = -1;
+  timestamps_.clear();
+  render_times_ms_.clear();
+  frame_rtc_times_ms_.clear();
+  drop_next_input_frame_ = false;
   // We enforce no extra stride/padding in the format creation step.
   jobjectArray input_buffers = reinterpret_cast<jobjectArray>(
       jni->CallObjectMethod(*j_media_codec_video_encoder_,
@@ -1467,7 +1550,7 @@
                             height_,
                             kbps,
                             fps));
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (IsNull(jni, input_buffers))
     return WEBRTC_VIDEO_CODEC_ERROR;
 
@@ -1487,17 +1570,18 @@
       return WEBRTC_VIDEO_CODEC_ERROR;
   }
   size_t num_input_buffers = jni->GetArrayLength(input_buffers);
-  CHECK(input_buffers_.empty(), "Unexpected double InitEncode without Release");
+  CHECK(input_buffers_.empty())
+      << "Unexpected double InitEncode without Release";
   input_buffers_.resize(num_input_buffers);
   for (size_t i = 0; i < num_input_buffers; ++i) {
     input_buffers_[i] =
         jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
     int64 yuv_buffer_capacity =
         jni->GetDirectBufferCapacity(input_buffers_[i]);
-    CHECK_EXCEPTION(jni, "");
-    CHECK(yuv_buffer_capacity >= yuv_size_, "Insufficient capacity");
+    CHECK_EXCEPTION(jni);
+    CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
   }
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
 
   codec_thread_->PostDelayed(kMediaCodecPollMs, this);
   return WEBRTC_VIDEO_CODEC_OK;
@@ -1510,6 +1594,9 @@
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
 
+  if (!inited_) {
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  }
   frames_received_++;
   if (!DeliverPendingOutputs(jni)) {
     ResetCodec();
@@ -1517,23 +1604,35 @@
   }
 
   if (drop_next_input_frame_) {
+    ALOGV("Encoder drop frame - failed callback.");
     drop_next_input_frame_ = false;
     return WEBRTC_VIDEO_CODEC_OK;
   }
 
-  CHECK(frame_types->size() == 1, "Unexpected stream count");
+  CHECK(frame_types->size() == 1) << "Unexpected stream count";
+  if (frame.width() != width_ || frame.height() != height_) {
+    frames_resolution_update_++;
+    ALOGD("Unexpected frame resolution change from %d x %d to %d x %d",
+        width_, height_, frame.width(), frame.height());
+    if (frames_resolution_update_ > 3) {
+      // Reset codec if we received more than 3 frames with new resolution.
+      width_ = frame.width();
+      height_ = frame.height();
+      frames_resolution_update_ = 0;
+      ResetCodec();
+    }
+    return WEBRTC_VIDEO_CODEC_OK;
+  }
+  frames_resolution_update_ = 0;
+
   bool key_frame = frame_types->front() != webrtc::kDeltaFrame;
 
-  CHECK(frame.width() == width_, "Unexpected resolution change");
-  CHECK(frame.height() == height_, "Unexpected resolution change");
-
   // Check if we accumulated too many frames in encoder input buffers
-  // so the encoder latency exceeds 100ms and drop frame if so.
-  if (frames_in_queue_ > 0 && last_input_timestamp_ms_ > 0 &&
-      last_output_timestamp_ms_ > 0) {
+  // or the encoder latency exceeds 70 ms and drop frame if so.
+  if (frames_in_queue_ > 0 && last_input_timestamp_ms_ >= 0) {
     int encoder_latency_ms = last_input_timestamp_ms_ -
         last_output_timestamp_ms_;
-    if (encoder_latency_ms > 100) {
+    if (frames_in_queue_ > 2 || encoder_latency_ms > 70) {
       ALOGV("Drop frame - encoder is behind by %d ms. Q size: %d",
           encoder_latency_ms, frames_in_queue_);
       frames_dropped_++;
@@ -1543,10 +1642,10 @@
 
   int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_encoder_,
                                                 j_dequeue_input_buffer_method_);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (j_input_buffer_index == -1) {
     // Video codec falls behind - no input buffer available.
-    ALOGV("Drop frame - no input buffers available");
+    ALOGV("Encoder drop frame - no input buffers available");
     frames_dropped_++;
     return WEBRTC_VIDEO_CODEC_OK;  // TODO(fischman): see webrtc bug 2887.
   }
@@ -1556,31 +1655,38 @@
   }
 
   ALOGV("Encode frame # %d. Buffer # %d. TS: %lld.",
-      frames_received_, j_input_buffer_index, frame.render_time_ms());
+      frames_received_, j_input_buffer_index, current_timestamp_us_ / 1000);
 
   jobject j_input_buffer = input_buffers_[j_input_buffer_index];
   uint8* yuv_buffer =
       reinterpret_cast<uint8*>(jni->GetDirectBufferAddress(j_input_buffer));
-  CHECK_EXCEPTION(jni, "");
-  CHECK(yuv_buffer, "Indirect buffer??");
+  CHECK_EXCEPTION(jni);
+  CHECK(yuv_buffer) << "Indirect buffer??";
   CHECK(!libyuv::ConvertFromI420(
           frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
           frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
           frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
           yuv_buffer, width_,
           width_, height_,
-          encoder_fourcc_),
-      "ConvertFromI420 failed");
-  jlong timestamp_us = frame.render_time_ms() * 1000;
-  last_input_timestamp_ms_ = frame.render_time_ms();
+          encoder_fourcc_))
+      << "ConvertFromI420 failed";
+  last_input_timestamp_ms_ = current_timestamp_us_ / 1000;
   frames_in_queue_++;
+
+  // Save input image timestamps for later output
+  timestamps_.push_back(frame.timestamp());
+  render_times_ms_.push_back(frame.render_time_ms());
+  frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
+
   bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
                                               j_encode_method_,
                                               key_frame,
                                               j_input_buffer_index,
                                               yuv_size_,
-                                              timestamp_us);
-  CHECK_EXCEPTION(jni, "");
+                                              current_timestamp_us_);
+  CHECK_EXCEPTION(jni);
+  current_timestamp_us_ += 1000000 / last_set_fps_;
+
   if (!encode_status || !DeliverPendingOutputs(jni)) {
     ResetCodec();
     return WEBRTC_VIDEO_CODEC_ERROR;
@@ -1599,8 +1705,9 @@
 }
 
 int32_t MediaCodecVideoEncoder::ReleaseOnCodecThread() {
-  if (!inited_)
+  if (!inited_) {
     return WEBRTC_VIDEO_CODEC_OK;
+  }
   CheckOnCodecThread();
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ALOGD("EncoderRelease: Frames received: %d. Frames dropped: %d.",
@@ -1610,8 +1717,9 @@
     jni->DeleteGlobalRef(input_buffers_[i]);
   input_buffers_.clear();
   jni->CallVoidMethod(*j_media_codec_video_encoder_, j_release_method_);
-  ResetParameters(jni);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
+  rtc::MessageQueueManager::Clear(this);
+  inited_ = false;
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
@@ -1624,13 +1732,17 @@
   }
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
-  last_set_bitrate_kbps_ = new_bit_rate;
-  last_set_fps_ = frame_rate;
+  if (new_bit_rate > 0) {
+    last_set_bitrate_kbps_ = new_bit_rate;
+  }
+  if (frame_rate > 0) {
+    last_set_fps_ = frame_rate;
+  }
   bool ret = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
                                        j_set_rates_method_,
-                                       new_bit_rate,
-                                       frame_rate);
-  CHECK_EXCEPTION(jni, "");
+                                       last_set_bitrate_kbps_,
+                                       last_set_fps_);
+  CHECK_EXCEPTION(jni);
   if (!ret) {
     ResetCodec();
     return WEBRTC_VIDEO_CODEC_ERROR;
@@ -1638,17 +1750,6 @@
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
-void MediaCodecVideoEncoder::ResetParameters(JNIEnv* jni) {
-  rtc::MessageQueueManager::Clear(this);
-  width_ = 0;
-  height_ = 0;
-  yuv_size_ = 0;
-  drop_next_input_frame_ = false;
-  inited_ = false;
-  CHECK(input_buffers_.empty(),
-        "ResetParameters called while holding input_buffers_!");
-}
-
 int MediaCodecVideoEncoder::GetOutputBufferInfoIndex(
     JNIEnv* jni,
     jobject j_output_buffer_info) {
@@ -1678,9 +1779,10 @@
   while (true) {
     jobject j_output_buffer_info = jni->CallObjectMethod(
         *j_media_codec_video_encoder_, j_dequeue_output_buffer_method_);
-    CHECK_EXCEPTION(jni, "");
-    if (IsNull(jni, j_output_buffer_info))
+    CHECK_EXCEPTION(jni);
+    if (IsNull(jni, j_output_buffer_info)) {
       break;
+    }
 
     int output_buffer_index =
         GetOutputBufferInfoIndex(jni, j_output_buffer_info);
@@ -1689,31 +1791,62 @@
       return false;
     }
 
-    jlong capture_time_ms =
+    // Get frame timestamps from a queue.
+    last_output_timestamp_ms_ =
         GetOutputBufferInfoPresentationTimestampUs(jni, j_output_buffer_info) /
         1000;
-    last_output_timestamp_ms_ = capture_time_ms;
+    int32_t timestamp = timestamps_.front();
+    timestamps_.erase(timestamps_.begin());
+    int64_t render_time_ms = render_times_ms_.front();
+    render_times_ms_.erase(render_times_ms_.begin());
+    int64_t frame_encoding_time_ms = GetCurrentTimeMs() -
+        frame_rtc_times_ms_.front();
+    frame_rtc_times_ms_.erase(frame_rtc_times_ms_.begin());
     frames_in_queue_--;
-    ALOGV("Encoder got output buffer # %d. TS: %lld. Latency: %lld",
-        output_buffer_index, last_output_timestamp_ms_,
-        last_input_timestamp_ms_ - last_output_timestamp_ms_);
 
+    // Extract payload and key frame flag.
     int32_t callback_status = 0;
+    jobject j_output_buffer =
+        GetOutputBufferInfoBuffer(jni, j_output_buffer_info);
+    bool key_frame = GetOutputBufferInfoIsKeyFrame(jni, j_output_buffer_info);
+    size_t payload_size = jni->GetDirectBufferCapacity(j_output_buffer);
+    uint8* payload = reinterpret_cast<uint8_t*>(
+        jni->GetDirectBufferAddress(j_output_buffer));
+    CHECK_EXCEPTION(jni);
+
+    ALOGV("Encoder got output buffer # %d. Size: %d. TS: %lld. Latency: %lld."
+        " EncTime: %lld",
+        output_buffer_index, payload_size, last_output_timestamp_ms_,
+        last_input_timestamp_ms_ - last_output_timestamp_ms_,
+        frame_encoding_time_ms);
+
+    // Calculate and print encoding statistics - every 3 seconds.
+    current_frames_++;
+    current_bytes_ += payload_size;
+    current_encoding_time_ms_ += frame_encoding_time_ms;
+    int statistic_time_ms = GetCurrentTimeMs() - start_time_ms_;
+    if (statistic_time_ms >= kMediaCodecStatisticsIntervalMs &&
+        current_frames_ > 0) {
+      ALOGD("Encoder bitrate: %d, target: %d kbps, fps: %d,"
+          " encTime: %d for last %d ms",
+          current_bytes_ * 8 / statistic_time_ms,
+          last_set_bitrate_kbps_,
+          (current_frames_ * 1000 + statistic_time_ms / 2) / statistic_time_ms,
+          current_encoding_time_ms_ / current_frames_, statistic_time_ms);
+      start_time_ms_ = GetCurrentTimeMs();
+      current_frames_ = 0;
+      current_bytes_= 0;
+      current_encoding_time_ms_ = 0;
+    }
+
+    // Callback - return encoded frame.
     if (callback_) {
-      jobject j_output_buffer =
-          GetOutputBufferInfoBuffer(jni, j_output_buffer_info);
-      bool key_frame = GetOutputBufferInfoIsKeyFrame(jni, j_output_buffer_info);
-      size_t payload_size = jni->GetDirectBufferCapacity(j_output_buffer);
-      uint8* payload = reinterpret_cast<uint8_t*>(
-          jni->GetDirectBufferAddress(j_output_buffer));
-      CHECK_EXCEPTION(jni, "");
       scoped_ptr<webrtc::EncodedImage> image(
           new webrtc::EncodedImage(payload, payload_size, payload_size));
       image->_encodedWidth = width_;
       image->_encodedHeight = height_;
-      // Convert capture time to 90 kHz RTP timestamp.
-      image->_timeStamp = static_cast<uint32_t>(90 * capture_time_ms);
-      image->capture_time_ms_ = capture_time_ms;
+      image->_timeStamp = timestamp;
+      image->capture_time_ms_ = render_time_ms;
       image->_frameType = (key_frame ? webrtc::kKeyFrame : webrtc::kDeltaFrame);
       image->_completeFrame = true;
 
@@ -1736,19 +1869,21 @@
       callback_status = callback_->Encoded(*image, &info, &header);
     }
 
+    // Return output buffer back to the encoder.
     bool success = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
                                           j_release_output_buffer_method_,
                                           output_buffer_index);
-    CHECK_EXCEPTION(jni, "");
+    CHECK_EXCEPTION(jni);
     if (!success) {
       ResetCodec();
       return false;
     }
 
-    if (callback_status > 0)
+    if (callback_status > 0) {
       drop_next_input_frame_ = true;
     // Theoretically could handle callback_status<0 here, but unclear what that
     // would mean for us.
+    }
   }
 
   return true;
@@ -1782,7 +1917,7 @@
   bool is_platform_supported = jni->CallStaticBooleanMethod(
       j_encoder_class,
       GetStaticMethodID(jni, j_encoder_class, "isPlatformSupported", "()Z"));
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (!is_platform_supported)
     return;
 
@@ -1823,6 +1958,8 @@
   explicit MediaCodecVideoDecoder(JNIEnv* jni);
   virtual ~MediaCodecVideoDecoder();
 
+  static int SetAndroidObjects(JNIEnv* jni, jobject render_egl_context);
+
   virtual int32_t InitDecode(const VideoCodec* codecSettings,
       int32_t numberOfCores) OVERRIDE;
 
@@ -1848,13 +1985,29 @@
   int32_t InitDecodeOnCodecThread();
   int32_t ReleaseOnCodecThread();
   int32_t DecodeOnCodecThread(const EncodedImage& inputImage);
+  // Deliver any outputs pending in the MediaCodec to our |callback_| and return
+  // true on success.
+  bool DeliverPendingOutputs(JNIEnv* jni, int dequeue_timeout_us);
+
 
   bool key_frame_required_;
   bool inited_;
+  bool use_surface_;
   VideoCodec codec_;
   I420VideoFrame decoded_image_;
+  NativeHandleImpl native_handle_;
   DecodedImageCallback* callback_;
-  int frames_received_; // Number of frames received by decoder.
+  int frames_received_;  // Number of frames received by decoder.
+  int frames_decoded_;  // Number of frames decoded by decoder
+  int64_t start_time_ms_;  // Start time for statistics.
+  int current_frames_;  // Number of frames in the current statistics interval.
+  int current_bytes_;  // Encoded bytes in the current statistics interval.
+  int current_decoding_time_ms_;  // Overall decoding time in the current second
+  uint32_t max_pending_frames_;  // Maximum number of pending input frames
+  std::vector<int32_t> timestamps_;
+  std::vector<int64_t> ntp_times_ms_;
+  std::vector<int64_t> frame_rtc_times_ms_;  // Time when video frame is sent to
+                                             // decoder input.
 
   // State that is constant for the lifetime of this object once the ctor
   // returns.
@@ -1867,6 +2020,7 @@
   jmethodID j_queue_input_buffer_method_;
   jmethodID j_dequeue_output_buffer_method_;
   jmethodID j_release_output_buffer_method_;
+  // MediaCodecVideoDecoder fields.
   jfieldID j_input_buffers_field_;
   jfieldID j_output_buffers_field_;
   jfieldID j_color_format_field_;
@@ -1874,32 +2028,59 @@
   jfieldID j_height_field_;
   jfieldID j_stride_field_;
   jfieldID j_slice_height_field_;
+  jfieldID j_surface_texture_field_;
+  jfieldID j_textureID_field_;
+  // MediaCodecVideoDecoder.DecoderOutputBufferInfo fields.
+  jfieldID j_info_index_field_;
+  jfieldID j_info_offset_field_;
+  jfieldID j_info_size_field_;
+  jfieldID j_info_presentation_timestamp_us_field_;
 
   // Global references; must be deleted in Release().
   std::vector<jobject> input_buffers_;
+  jobject surface_texture_;
+
+  // Render EGL context.
+  static jobject render_egl_context_;
 };
 
-MediaCodecVideoDecoder::MediaCodecVideoDecoder(JNIEnv* jni) :
-  key_frame_required_(true),
-  inited_(false),
-  codec_thread_(new Thread()),
-  j_media_codec_video_decoder_class_(
-      jni,
-      FindClass(jni, "org/webrtc/MediaCodecVideoDecoder")),
-  j_media_codec_video_decoder_(
-      jni,
-      jni->NewObject(*j_media_codec_video_decoder_class_,
-                     GetMethodID(jni,
-                                 *j_media_codec_video_decoder_class_,
-                                 "<init>",
-                                 "()V"))) {
+jobject MediaCodecVideoDecoder::render_egl_context_ = NULL;
+
+int MediaCodecVideoDecoder::SetAndroidObjects(JNIEnv* jni,
+    jobject render_egl_context) {
+  if (render_egl_context_) {
+    jni->DeleteGlobalRef(render_egl_context_);
+  }
+  if (IsNull(jni, render_egl_context)) {
+    render_egl_context_ = NULL;
+  } else {
+    render_egl_context_ = jni->NewGlobalRef(render_egl_context);
+  }
+  ALOGD("VideoDecoder EGL context set.");
+  return 0;
+}
+
+MediaCodecVideoDecoder::MediaCodecVideoDecoder(JNIEnv* jni)
+  : key_frame_required_(true),
+    inited_(false),
+    codec_thread_(new Thread()),
+    j_media_codec_video_decoder_class_(
+        jni,
+        FindClass(jni, "org/webrtc/MediaCodecVideoDecoder")),
+          j_media_codec_video_decoder_(
+              jni,
+              jni->NewObject(*j_media_codec_video_decoder_class_,
+                   GetMethodID(jni,
+                              *j_media_codec_video_decoder_class_,
+                              "<init>",
+                              "()V"))) {
   ScopedLocalRefFrame local_ref_frame(jni);
   codec_thread_->SetName("MediaCodecVideoDecoder", NULL);
-  CHECK(codec_thread_->Start(), "Failed to start MediaCodecVideoDecoder");
+  CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoDecoder";
 
-  j_init_decode_method_ = GetMethodID(jni,
-                                      *j_media_codec_video_decoder_class_,
-                                      "initDecode", "(II)Z");
+  j_init_decode_method_ = GetMethodID(
+      jni, *j_media_codec_video_decoder_class_, "initDecode",
+      "(IIZLandroid/opengl/EGLContext;)Z");
   j_release_method_ =
       GetMethodID(jni, *j_media_codec_video_decoder_class_, "release", "()V");
   j_dequeue_input_buffer_method_ = GetMethodID(
@@ -1907,9 +2088,10 @@
   j_queue_input_buffer_method_ = GetMethodID(
       jni, *j_media_codec_video_decoder_class_, "queueInputBuffer", "(IIJ)Z");
   j_dequeue_output_buffer_method_ = GetMethodID(
-      jni, *j_media_codec_video_decoder_class_, "dequeueOutputBuffer", "()I");
+      jni, *j_media_codec_video_decoder_class_, "dequeueOutputBuffer",
+      "(I)Lorg/webrtc/MediaCodecVideoDecoder$DecoderOutputBufferInfo;");
   j_release_output_buffer_method_ = GetMethodID(
-      jni, *j_media_codec_video_decoder_class_, "releaseOutputBuffer", "(I)Z");
+      jni, *j_media_codec_video_decoder_class_, "releaseOutputBuffer", "(IZ)Z");
 
   j_input_buffers_field_ = GetFieldID(
       jni, *j_media_codec_video_decoder_class_,
@@ -1927,12 +2109,32 @@
       jni, *j_media_codec_video_decoder_class_, "stride", "I");
   j_slice_height_field_ = GetFieldID(
       jni, *j_media_codec_video_decoder_class_, "sliceHeight", "I");
+  j_textureID_field_ = GetFieldID(
+      jni, *j_media_codec_video_decoder_class_, "textureID", "I");
+  j_surface_texture_field_ = GetFieldID(
+      jni, *j_media_codec_video_decoder_class_, "surfaceTexture",
+      "Landroid/graphics/SurfaceTexture;");
 
-  CHECK_EXCEPTION(jni, "MediaCodecVideoDecoder ctor failed");
+  jclass j_decoder_output_buffer_info_class = FindClass(jni,
+      "org/webrtc/MediaCodecVideoDecoder$DecoderOutputBufferInfo");
+  j_info_index_field_ = GetFieldID(
+      jni, j_decoder_output_buffer_info_class, "index", "I");
+  j_info_offset_field_ = GetFieldID(
+      jni, j_decoder_output_buffer_info_class, "offset", "I");
+  j_info_size_field_ = GetFieldID(
+      jni, j_decoder_output_buffer_info_class, "size", "I");
+  j_info_presentation_timestamp_us_field_ = GetFieldID(
+      jni, j_decoder_output_buffer_info_class, "presentationTimestampUs", "J");
+
+  CHECK_EXCEPTION(jni) << "MediaCodecVideoDecoder ctor failed";
+  use_surface_ = true;
+  if (render_egl_context_ == NULL)
+    use_surface_ = false;
   memset(&codec_, 0, sizeof(codec_));
 }
 
 MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
+  // Call Release() to ensure no more callbacks to us after we are deleted.
   Release();
 }
 
@@ -1954,6 +2156,7 @@
   // Always start with a complete key frame.
   key_frame_required_ = true;
   frames_received_ = 0;
+  frames_decoded_ = 0;
 
   // Call Java init.
   return codec_thread_->Invoke<int32_t>(
@@ -1964,28 +2167,50 @@
   CheckOnCodecThread();
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
-  ALOGD("InitDecodeOnCodecThread: %d x %d. FPS: %d",
+  ALOGD("InitDecodeOnCodecThread: %d x %d. fps: %d",
       codec_.width, codec_.height, codec_.maxFramerate);
 
   bool success = jni->CallBooleanMethod(*j_media_codec_video_decoder_,
                                        j_init_decode_method_,
                                        codec_.width,
-                                       codec_.height);
-  CHECK_EXCEPTION(jni, "");
-  if (!success)
+                                       codec_.height,
+                                       use_surface_,
+                                       render_egl_context_);
+  CHECK_EXCEPTION(jni);
+  if (!success) {
     return WEBRTC_VIDEO_CODEC_ERROR;
+  }
   inited_ = true;
 
+  max_pending_frames_ = 0;
+  if (use_surface_) {
+    max_pending_frames_ = 1;
+  }
+  start_time_ms_ = GetCurrentTimeMs();
+  current_frames_ = 0;
+  current_bytes_ = 0;
+  current_decoding_time_ms_ = 0;
+  timestamps_.clear();
+  ntp_times_ms_.clear();
+  frame_rtc_times_ms_.clear();
+
   jobjectArray input_buffers = (jobjectArray)GetObjectField(
       jni, *j_media_codec_video_decoder_, j_input_buffers_field_);
   size_t num_input_buffers = jni->GetArrayLength(input_buffers);
-
   input_buffers_.resize(num_input_buffers);
   for (size_t i = 0; i < num_input_buffers; ++i) {
     input_buffers_[i] =
         jni->NewGlobalRef(jni->GetObjectArrayElement(input_buffers, i));
-    CHECK_EXCEPTION(jni, "");
+    CHECK_EXCEPTION(jni);
   }
+
+  if (use_surface_) {
+    jobject surface_texture = GetObjectField(
+        jni, *j_media_codec_video_decoder_, j_surface_texture_field_);
+    surface_texture_ = jni->NewGlobalRef(surface_texture);
+  }
+  codec_thread_->PostDelayed(kMediaCodecPollMs, this);
+
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
@@ -1995,25 +2220,40 @@
 }
 
 int32_t MediaCodecVideoDecoder::ReleaseOnCodecThread() {
-  if (!inited_)
+  if (!inited_) {
     return WEBRTC_VIDEO_CODEC_OK;
+  }
   CheckOnCodecThread();
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ALOGD("DecoderRelease: Frames received: %d.", frames_received_);
   ScopedLocalRefFrame local_ref_frame(jni);
-  for (size_t i = 0; i < input_buffers_.size(); ++i)
+  for (size_t i = 0; i < input_buffers_.size(); i++) {
     jni->DeleteGlobalRef(input_buffers_[i]);
+  }
   input_buffers_.clear();
+  if (use_surface_) {
+    // Before deleting texture object make sure it is no longer referenced
+    // by any TextureVideoFrame.
+    int32_t waitTimeoutUs = 3000000;  // 3 second wait
+    while (waitTimeoutUs > 0 && native_handle_.ref_count() > 0) {
+      ALOGD("Current Texture RefCnt: %d", native_handle_.ref_count());
+      usleep(30000);
+      waitTimeoutUs -= 30000;
+    }
+    ALOGD("TextureRefCnt: %d", native_handle_.ref_count());
+    jni->DeleteGlobalRef(surface_texture_);
+  }
   jni->CallVoidMethod(*j_media_codec_video_decoder_, j_release_method_);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
+  rtc::MessageQueueManager::Clear(this);
   inited_ = false;
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 
 void MediaCodecVideoDecoder::CheckOnCodecThread() {
-  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread(),
-        "Running on wrong thread!");
+  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
+      << "Running on wrong thread!";
 }
 
 int32_t MediaCodecVideoDecoder::Decode(
@@ -2066,10 +2306,25 @@
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
 
+  // Try to drain the decoder and wait until output is not too
+  // much behind the input.
+  if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+    ALOGV("Wait for output...");
+    if (!DeliverPendingOutputs(jni, kMediaCodecTimeoutMs * 1000)) {
+      Reset();
+      return WEBRTC_VIDEO_CODEC_ERROR;
+    }
+    if (frames_received_ > frames_decoded_ + max_pending_frames_) {
+      ALOGE("Output buffer dequeue timeout");
+      Reset();
+      return WEBRTC_VIDEO_CODEC_ERROR;
+    }
+  }
+
   // Get input buffer.
   int j_input_buffer_index = jni->CallIntMethod(*j_media_codec_video_decoder_,
                                                 j_dequeue_input_buffer_method_);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (j_input_buffer_index < 0) {
     ALOGE("dequeueInputBuffer error");
     Reset();
@@ -2080,19 +2335,26 @@
   jobject j_input_buffer = input_buffers_[j_input_buffer_index];
   uint8* buffer =
       reinterpret_cast<uint8*>(jni->GetDirectBufferAddress(j_input_buffer));
-  CHECK(buffer, "Indirect buffer??");
+  CHECK(buffer) << "Indirect buffer??";
   int64 buffer_capacity = jni->GetDirectBufferCapacity(j_input_buffer);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (buffer_capacity < inputImage._length) {
     ALOGE("Input frame size %d is bigger than buffer size %d.",
         inputImage._length, buffer_capacity);
     Reset();
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
-  ALOGV("Decode frame # %d. Buffer # %d. Size: %d",
+  ALOGV("Decoder frame in # %d. Buffer # %d. Size: %d",
       frames_received_, j_input_buffer_index, inputImage._length);
   memcpy(buffer, inputImage._buffer, inputImage._length);
 
+  // Save input image timestamps for later output.
+  frames_received_++;
+  current_bytes_ += inputImage._length;
+  timestamps_.push_back(inputImage._timeStamp);
+  ntp_times_ms_.push_back(inputImage.ntp_time_ms_);
+  frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
+
   // Feed input to decoder.
   jlong timestamp_us = (frames_received_ * 1000000) / codec_.maxFramerate;
   bool success = jni->CallBooleanMethod(*j_media_codec_video_decoder_,
@@ -2100,33 +2362,55 @@
                                         j_input_buffer_index,
                                         inputImage._length,
                                         timestamp_us);
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
   if (!success) {
     ALOGE("queueInputBuffer error");
     Reset();
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
 
-  // Get output index.
-  int j_output_buffer_index =
-      jni->CallIntMethod(*j_media_codec_video_decoder_,
-                         j_dequeue_output_buffer_method_);
-  CHECK_EXCEPTION(jni, "");
-  if (j_output_buffer_index < 0) {
-    ALOGE("dequeueOutputBuffer error");
+  // Try to drain the decoder
+  if (!DeliverPendingOutputs(jni, 0)) {
+    ALOGE("DeliverPendingOutputs error");
     Reset();
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
 
-  // Extract data from Java ByteBuffer.
-  jobjectArray output_buffers = reinterpret_cast<jobjectArray>(GetObjectField(
-      jni, *j_media_codec_video_decoder_, j_output_buffers_field_));
-  jobject output_buffer =
-      jni->GetObjectArrayElement(output_buffers, j_output_buffer_index);
-  buffer_capacity = jni->GetDirectBufferCapacity(output_buffer);
-  uint8_t* payload =
-      reinterpret_cast<uint8_t*>(jni->GetDirectBufferAddress(output_buffer));
-  CHECK_EXCEPTION(jni, "");
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool MediaCodecVideoDecoder::DeliverPendingOutputs(
+    JNIEnv* jni, int dequeue_timeout_us) {
+  if (frames_received_ <= frames_decoded_) {
+    // No need to query for output buffers - decoder is drained.
+    return true;
+  }
+  // Get decoder output.
+  jobject j_decoder_output_buffer_info = jni->CallObjectMethod(
+      *j_media_codec_video_decoder_,
+      j_dequeue_output_buffer_method_,
+      dequeue_timeout_us);
+
+  CHECK_EXCEPTION(jni);
+  if (IsNull(jni, j_decoder_output_buffer_info)) {
+    return true;
+  }
+
+  // Extract output buffer info from Java DecoderOutputBufferInfo.
+  int output_buffer_index =
+      GetIntField(jni, j_decoder_output_buffer_info, j_info_index_field_);
+  if (output_buffer_index < 0) {
+    ALOGE("dequeueOutputBuffer error : %d", output_buffer_index);
+    Reset();
+    return false;
+  }
+  int output_buffer_offset =
+      GetIntField(jni, j_decoder_output_buffer_info, j_info_offset_field_);
+  int output_buffer_size =
+      GetIntField(jni, j_decoder_output_buffer_info, j_info_size_field_);
+  CHECK_EXCEPTION(jni);
+
+  // Get decoded video frame properties.
   int color_format = GetIntField(jni, *j_media_codec_video_decoder_,
       j_color_format_field_);
   int width = GetIntField(jni, *j_media_codec_video_decoder_, j_width_field_);
@@ -2134,52 +2418,112 @@
   int stride = GetIntField(jni, *j_media_codec_video_decoder_, j_stride_field_);
   int slice_height = GetIntField(jni, *j_media_codec_video_decoder_,
       j_slice_height_field_);
-  if (buffer_capacity < width * height * 3 / 2) {
-    ALOGE("Insufficient output buffer capacity: %d", buffer_capacity);
-    Reset();
-    return WEBRTC_VIDEO_CODEC_ERROR;
-  }
-  ALOGV("Decoder got output buffer %d x %d. %d x %d. Color: 0x%x. Size: %d",
-      width, height, stride, slice_height, color_format, buffer_capacity);
+  int texture_id = GetIntField(jni, *j_media_codec_video_decoder_,
+      j_textureID_field_);
 
-  if (color_format == COLOR_FormatYUV420Planar) {
-    decoded_image_.CreateFrame(
-        stride * slice_height, payload,
-        (stride * slice_height) / 4, payload + (stride * slice_height),
-        (stride * slice_height) / 4, payload + (5 * stride * slice_height / 4),
-        width, height,
-        stride, stride / 2, stride / 2);
-  } else {
-    // All other supported formats are nv12.
-    decoded_image_.CreateEmptyFrame(width, height, width, width / 2, width / 2);
-    libyuv::NV12ToI420(
-        payload, stride,
-        payload + stride * slice_height, stride,
-        decoded_image_.buffer(webrtc::kYPlane),
-        decoded_image_.stride(webrtc::kYPlane),
-        decoded_image_.buffer(webrtc::kUPlane),
-        decoded_image_.stride(webrtc::kUPlane),
-        decoded_image_.buffer(webrtc::kVPlane),
-        decoded_image_.stride(webrtc::kVPlane),
-        width, height);
+  // Extract data from Java ByteBuffer and create output yuv420 frame -
+  // for non surface decoding only.
+  if (!use_surface_) {
+    if (output_buffer_size < width * height * 3 / 2) {
+      ALOGE("Insufficient output buffer size: %d", output_buffer_size);
+      Reset();
+      return false;
+    }
+    jobjectArray output_buffers = reinterpret_cast<jobjectArray>(GetObjectField(
+        jni, *j_media_codec_video_decoder_, j_output_buffers_field_));
+    jobject output_buffer =
+        jni->GetObjectArrayElement(output_buffers, output_buffer_index);
+    uint8_t* payload = reinterpret_cast<uint8_t*>(jni->GetDirectBufferAddress(
+        output_buffer));
+    CHECK_EXCEPTION(jni);
+    payload += output_buffer_offset;
+
+    // Create yuv420 frame.
+    if (color_format == COLOR_FormatYUV420Planar) {
+      decoded_image_.CreateFrame(
+          stride * slice_height, payload,
+          (stride * slice_height) / 4, payload + (stride * slice_height),
+          (stride * slice_height) / 4, payload + (5 * stride * slice_height / 4),
+          width, height,
+          stride, stride / 2, stride / 2);
+    } else {
+      // All other supported formats are nv12.
+      decoded_image_.CreateEmptyFrame(width, height, width,
+          width / 2, width / 2);
+      libyuv::NV12ToI420(
+          payload, stride,
+          payload + stride * slice_height, stride,
+          decoded_image_.buffer(webrtc::kYPlane),
+          decoded_image_.stride(webrtc::kYPlane),
+          decoded_image_.buffer(webrtc::kUPlane),
+          decoded_image_.stride(webrtc::kUPlane),
+          decoded_image_.buffer(webrtc::kVPlane),
+          decoded_image_.stride(webrtc::kVPlane),
+          width, height);
+    }
   }
 
+  // Get frame timestamps from a queue.
+  int32_t timestamp = timestamps_.front();
+  timestamps_.erase(timestamps_.begin());
+  int64_t ntp_time_ms = ntp_times_ms_.front();
+  ntp_times_ms_.erase(ntp_times_ms_.begin());
+  int64_t frame_decoding_time_ms = GetCurrentTimeMs() -
+      frame_rtc_times_ms_.front();
+  frame_rtc_times_ms_.erase(frame_rtc_times_ms_.begin());
+
+  ALOGV("Decoder frame out # %d. %d x %d. %d x %d. Color: 0x%x. Size: %d."
+      " DecTime: %lld", frames_decoded_, width, height, stride, slice_height,
+      color_format, output_buffer_size, frame_decoding_time_ms);
+
   // Return output buffer back to codec.
-  success = jni->CallBooleanMethod(*j_media_codec_video_decoder_,
-                                   j_release_output_buffer_method_,
-                                   j_output_buffer_index);
-  CHECK_EXCEPTION(jni, "");
+  bool success = jni->CallBooleanMethod(
+      *j_media_codec_video_decoder_,
+      j_release_output_buffer_method_,
+      output_buffer_index,
+      use_surface_);
+  CHECK_EXCEPTION(jni);
   if (!success) {
     ALOGE("releaseOutputBuffer error");
     Reset();
-    return WEBRTC_VIDEO_CODEC_ERROR;
+    return false;
   }
 
-  // Callback.
-  decoded_image_.set_timestamp(inputImage._timeStamp);
-  decoded_image_.set_ntp_time_ms(inputImage.ntp_time_ms_);
-  frames_received_++;
-  return callback_->Decoded(decoded_image_);
+  // Calculate and print decoding statistics - every 3 seconds.
+  frames_decoded_++;
+  current_frames_++;
+  current_decoding_time_ms_ += frame_decoding_time_ms;
+  int statistic_time_ms = GetCurrentTimeMs() - start_time_ms_;
+  if (statistic_time_ms >= kMediaCodecStatisticsIntervalMs &&
+      current_frames_ > 0) {
+    ALOGD("Decoder bitrate: %d kbps, fps: %d, decTime: %d for last %d ms",
+        current_bytes_ * 8 / statistic_time_ms,
+        (current_frames_ * 1000 + statistic_time_ms / 2) / statistic_time_ms,
+        current_decoding_time_ms_ / current_frames_, statistic_time_ms);
+    start_time_ms_ = GetCurrentTimeMs();
+    current_frames_ = 0;
+    current_bytes_= 0;
+    current_decoding_time_ms_ = 0;
+  }
+
+  // Callback - output decoded frame.
+  int32_t callback_status = WEBRTC_VIDEO_CODEC_OK;
+  if (use_surface_) {
+    native_handle_.SetTextureObject(surface_texture_, texture_id);
+    TextureVideoFrame texture_image(
+        &native_handle_, width, height, timestamp, 0);
+    texture_image.set_ntp_time_ms(ntp_time_ms);
+    callback_status = callback_->Decoded(texture_image);
+  } else {
+    decoded_image_.set_timestamp(timestamp);
+    decoded_image_.set_ntp_time_ms(ntp_time_ms);
+    callback_status = callback_->Decoded(decoded_image_);
+  }
+  if (callback_status > 0) {
+    ALOGE("callback error");
+  }
+
+  return true;
 }
 
 int32_t MediaCodecVideoDecoder::RegisterDecodeCompleteCallback(
@@ -2197,6 +2541,19 @@
 }
 
 void MediaCodecVideoDecoder::OnMessage(rtc::Message* msg) {
+  JNIEnv* jni = AttachCurrentThreadIfNeeded();
+  ScopedLocalRefFrame local_ref_frame(jni);
+  if (!inited_) {
+    return;
+  }
+  // We only ever send one message to |this| directly (not through a Bind()'d
+  // functor), so expect no ID/data.
+  CHECK(!msg->message_id) << "Unexpected message!";
+  CHECK(!msg->pdata) << "Unexpected message!";
+  CheckOnCodecThread();
+
+  DeliverPendingOutputs(jni, 0);
+  codec_thread_->PostDelayed(kMediaCodecPollMs, this);
 }
 
 class MediaCodecVideoDecoderFactory
@@ -2221,7 +2578,7 @@
   is_platform_supported_ = jni->CallStaticBooleanMethod(
       j_decoder_class,
       GetStaticMethodID(jni, j_decoder_class, "isPlatformSupported", "()Z"));
-  CHECK_EXCEPTION(jni, "");
+  CHECK_EXCEPTION(jni);
 }
 
 MediaCodecVideoDecoderFactory::~MediaCodecVideoDecoderFactory() {}
@@ -2240,7 +2597,7 @@
   delete decoder;
 }
 
-#endif  // ANDROID
+#endif  // #if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 
 }  // anonymous namespace
 
@@ -2250,13 +2607,13 @@
   Java_org_webrtc_##name
 
 extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
-  CHECK(!g_jvm, "JNI_OnLoad called more than once!");
+  CHECK(!g_jvm) << "JNI_OnLoad called more than once!";
   g_jvm = jvm;
-  CHECK(g_jvm, "JNI_OnLoad handed NULL?");
+  CHECK(g_jvm) << "JNI_OnLoad handed NULL?";
 
-  CHECK(!pthread_once(&g_jni_ptr_once, &CreateJNIPtrKey), "pthread_once");
+  CHECK(!pthread_once(&g_jni_ptr_once, &CreateJNIPtrKey)) << "pthread_once";
 
-  CHECK(rtc::InitializeSSL(), "Failed to InitializeSSL()");
+  CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
 
   JNIEnv* jni;
   if (jvm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6) != JNI_OK)
@@ -2270,7 +2627,7 @@
   g_class_reference_holder->FreeReferences(AttachCurrentThreadIfNeeded());
   delete g_class_reference_holder;
   g_class_reference_holder = NULL;
-  CHECK(rtc::CleanupSSL(), "Failed to CleanupSSL()");
+  CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
   g_jvm = NULL;
 }
 
@@ -2306,8 +2663,8 @@
 
 JOW(jlong, DataChannel_bufferedAmount)(JNIEnv* jni, jobject j_dc) {
   uint64 buffered_amount = ExtractNativeDC(jni, j_dc)->buffered_amount();
-  CHECK(buffered_amount <= std::numeric_limits<int64>::max(),
-        "buffered_amount overflowed jlong!");
+  CHECK_LE(buffered_amount, std::numeric_limits<int64>::max())
+      << "buffered_amount overflowed jlong!";
   return static_cast<jlong>(buffered_amount);
 }
 
@@ -2335,12 +2692,12 @@
   std::string path = JavaToStdString(jni, j_path);
   if (nativeLevels != webrtc::kTraceNone) {
     webrtc::Trace::set_level_filter(nativeLevels);
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
     if (path != "logcat:") {
 #endif
-      CHECK(webrtc::Trace::SetTraceFile(path.c_str(), false) == 0,
-            "SetTraceFile failed");
-#ifdef ANDROID
+      CHECK_EQ(0, webrtc::Trace::SetTraceFile(path.c_str(), false))
+          << "SetTraceFile failed";
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
     } else {
       // Intentionally leak this to avoid needing to reason about its lifecycle.
       // It keeps no state and functions only as a dispatch point.
@@ -2368,10 +2725,14 @@
   delete reinterpret_cast<cricket::VideoCapturer*>(j_p);
 }
 
-JOW(void, VideoRenderer_free)(JNIEnv*, jclass, jlong j_p) {
+JOW(void, VideoRenderer_freeGuiVideoRenderer)(JNIEnv*, jclass, jlong j_p) {
   delete reinterpret_cast<VideoRendererWrapper*>(j_p);
 }
 
+JOW(void, VideoRenderer_freeWrappedVideoRenderer)(JNIEnv*, jclass, jlong j_p) {
+  delete reinterpret_cast<JavaVideoRendererWrapper*>(j_p);
+}
+
 JOW(void, MediaStreamTrack_free)(JNIEnv*, jclass, jlong j_p) {
   CHECK_RELEASE(reinterpret_cast<MediaStreamTrackInterface*>(j_p));
 }
@@ -2414,19 +2775,28 @@
   return (jlong)new PCOJava(jni, j_observer);
 }
 
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 JOW(jboolean, PeerConnectionFactory_initializeAndroidGlobals)(
     JNIEnv* jni, jclass, jobject context,
-    jboolean initialize_audio, jboolean initialize_video) {
-  CHECK(g_jvm, "JNI_OnLoad failed to run?");
+    jboolean initialize_audio, jboolean initialize_video,
+    jobject render_egl_context) {
+  CHECK(g_jvm) << "JNI_OnLoad failed to run?";
   bool failure = false;
+  if (!factory_static_initialized) {
+    if (initialize_video) {
+      failure |= webrtc::SetCaptureAndroidVM(g_jvm, context);
+      failure |= webrtc::SetRenderAndroidVM(g_jvm);
+    }
+    if (initialize_audio)
+      failure |= webrtc::VoiceEngine::SetAndroidObjects(g_jvm, jni, context);
+    factory_static_initialized = true;
+  }
   if (initialize_video)
-    failure |= webrtc::VideoEngine::SetAndroidObjects(g_jvm, context);
-  if (initialize_audio)
-    failure |= webrtc::VoiceEngine::SetAndroidObjects(g_jvm, jni, context);
+    failure |= MediaCodecVideoDecoder::SetAndroidObjects(jni,
+        render_egl_context);
   return !failure;
 }
-#endif  // ANDROID
+#endif  // defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 
 // Helper struct for working around the fact that CreatePeerConnectionFactory()
 // comes in two flavors: either entirely automagical (constructing its own
@@ -2466,11 +2836,11 @@
   worker_thread->SetName("worker_thread", NULL);
   Thread* signaling_thread = new Thread();
   signaling_thread->SetName("signaling_thread", NULL);
-  CHECK(worker_thread->Start() && signaling_thread->Start(),
-        "Failed to start threads");
+  CHECK(worker_thread->Start() && signaling_thread->Start())
+      << "Failed to start threads";
   scoped_ptr<cricket::WebRtcVideoEncoderFactory> encoder_factory;
   scoped_ptr<cricket::WebRtcVideoDecoderFactory> decoder_factory;
-#ifdef ANDROID
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
   encoder_factory.reset(new MediaCodecVideoEncoderFactory());
   decoder_factory.reset(new MediaCodecVideoDecoderFactory());
 #endif
@@ -2558,15 +2928,15 @@
   jmethodID iterator_id = GetMethodID(
       jni, list_class, "iterator", "()Ljava/util/Iterator;");
   jobject iterator = jni->CallObjectMethod(j_ice_servers, iterator_id);
-  CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+  CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
   jmethodID iterator_has_next = GetMethodID(
       jni, GetObjectClass(jni, iterator), "hasNext", "()Z");
   jmethodID iterator_next = GetMethodID(
       jni, GetObjectClass(jni, iterator), "next", "()Ljava/lang/Object;");
   while (jni->CallBooleanMethod(iterator, iterator_has_next)) {
-    CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+    CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
     jobject j_ice_server = jni->CallObjectMethod(iterator, iterator_next);
-    CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+    CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
     jclass j_ice_server_class = GetObjectClass(jni, j_ice_server);
     jfieldID j_ice_server_uri_id =
         GetFieldID(jni, j_ice_server_class, "uri", "Ljava/lang/String;");
@@ -2586,7 +2956,7 @@
     server.password = JavaToStdString(jni, password);
     ice_servers->push_back(server);
   }
-  CHECK_EXCEPTION(jni, "error during CallBooleanMethod");
+  CHECK_EXCEPTION(jni) << "error during CallBooleanMethod";
 }
 
 JOW(jlong, PeerConnectionFactory_nativeCreatePeerConnection)(
@@ -2635,16 +3005,16 @@
   // vararg parameter as 64-bit and reading memory that doesn't belong to the
   // 32-bit parameter.
   jlong nativeChannelPtr = jlongFromPointer(channel.get());
-  CHECK(nativeChannelPtr, "Failed to create DataChannel");
+  CHECK(nativeChannelPtr) << "Failed to create DataChannel";
   jclass j_data_channel_class = FindClass(jni, "org/webrtc/DataChannel");
   jmethodID j_data_channel_ctor = GetMethodID(
       jni, j_data_channel_class, "<init>", "(J)V");
   jobject j_channel = jni->NewObject(
       j_data_channel_class, j_data_channel_ctor, nativeChannelPtr);
-  CHECK_EXCEPTION(jni, "error during NewObject");
+  CHECK_EXCEPTION(jni) << "error during NewObject";
   // Channel is now owned by Java object, and will be freed from there.
   int bumped_count = channel->AddRef();
-  CHECK(bumped_count == 2, "Unexpected refcount");
+  CHECK(bumped_count == 2) << "Unexpected refcount";
   return j_channel;
 }
 
@@ -2680,7 +3050,7 @@
       "()Ljava/lang/String;");
   jstring j_type_string = (jstring)jni->CallObjectMethod(
       j_type, j_canonical_form_id);
-  CHECK_EXCEPTION(jni, "error during CallObjectMethod");
+  CHECK_EXCEPTION(jni) << "error during CallObjectMethod";
   std::string std_type = JavaToStdString(jni, j_type_string);
 
   jfieldID j_description_id = GetFieldID(
@@ -2790,7 +3160,7 @@
   std::string device_name = JavaToStdString(jni, j_device_name);
   scoped_ptr<cricket::DeviceManagerInterface> device_manager(
       cricket::DeviceManagerFactory::Create());
-  CHECK(device_manager->Init(), "DeviceManager::Init() failed");
+  CHECK(device_manager->Init()) << "DeviceManager::Init() failed";
   cricket::Device device;
   if (!device_manager->GetVideoCaptureDevice(device_name, &device)) {
     LOG(LS_ERROR) << "GetVideoCaptureDevice failed for " << device_name;
@@ -2826,8 +3196,8 @@
 
 JOW(void, VideoSource_restart)(
     JNIEnv* jni, jclass, jlong j_p_source, jlong j_p_format) {
-  CHECK(j_p_source, "");
-  CHECK(j_p_format, "");
+  CHECK(j_p_source);
+  CHECK(j_p_format);
   scoped_ptr<cricket::VideoFormatPod> format(
       reinterpret_cast<cricket::VideoFormatPod*>(j_p_format));
   reinterpret_cast<VideoSourceInterface*>(j_p_source)->GetVideoCapturer()->
diff --git a/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java b/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
index a6a059e..9280743 100644
--- a/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
+++ b/app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java
@@ -27,14 +27,24 @@
 
 package org.webrtc;
 
+import android.graphics.SurfaceTexture;
 import android.media.MediaCodec;
-import android.media.MediaCodecInfo.CodecCapabilities;
 import android.media.MediaCodecInfo;
+import android.media.MediaCodecInfo.CodecCapabilities;
 import android.media.MediaCodecList;
 import android.media.MediaFormat;
+import android.opengl.EGL14;
+import android.opengl.EGLConfig;
+import android.opengl.EGLContext;
+import android.opengl.EGLDisplay;
+import android.opengl.EGLSurface;
+import android.opengl.GLES11Ext;
+import android.opengl.GLES20;
 import android.os.Build;
 import android.os.Bundle;
 import android.util.Log;
+import android.view.Surface;
+
 import java.nio.ByteBuffer;
 
 // Java-side of peerconnection_jni.cc:MediaCodecVideoDecoder.
@@ -49,7 +59,7 @@
 
   private static final String TAG = "MediaCodecVideoDecoder";
 
-  private static final int DEQUEUE_TIMEOUT = 1000000;  // 1 sec timeout.
+  private static final int DEQUEUE_INPUT_TIMEOUT = 500000;  // 500 ms timeout.
   private Thread mediaCodecThread;
   private MediaCodec mediaCodec;
   private ByteBuffer[] inputBuffers;
@@ -57,7 +67,7 @@
   private static final String VP8_MIME_TYPE = "video/x-vnd.on2.vp8";
   // List of supported HW VP8 decoders.
   private static final String[] supportedHwCodecPrefixes =
-    {"OMX.Nvidia."};
+    {"OMX.qcom.", "OMX.Nvidia." };
   // NV12 color format supported by QCOM codec, but not declared in MediaCodec -
   // see /hardware/qcom/media/mm-core/inc/OMX_QCOMExtns.h
   private static final int
@@ -74,12 +84,21 @@
   private int height;
   private int stride;
   private int sliceHeight;
+  private boolean useSurface;
+  private int textureID = -1;
+  private SurfaceTexture surfaceTexture = null;
+  private Surface surface = null;
+  private float[] stMatrix = new float[16];
+  private EGLDisplay eglDisplay = EGL14.EGL_NO_DISPLAY;
+  private EGLContext eglContext = EGL14.EGL_NO_CONTEXT;
+  private EGLSurface eglSurface = EGL14.EGL_NO_SURFACE;
+
 
   private MediaCodecVideoDecoder() { }
 
   // Helper struct for findVp8HwDecoder() below.
   private static class DecoderProperties {
-    DecoderProperties(String codecName, int colorFormat) {
+    public DecoderProperties(String codecName, int colorFormat) {
       this.codecName = codecName;
       this.colorFormat = colorFormat;
     }
@@ -107,26 +126,32 @@
         continue;  // No VP8 support in this codec; try the next one.
       }
       Log.d(TAG, "Found candidate decoder " + name);
+
+      // Check if this is supported HW decoder.
+      boolean supportedCodec = false;
+      for (String hwCodecPrefix : supportedHwCodecPrefixes) {
+        if (name.startsWith(hwCodecPrefix)) {
+          supportedCodec = true;
+          break;
+        }
+      }
+      if (!supportedCodec) {
+        continue;
+      }
+
+      // Check if codec supports either yuv420 or nv12.
       CodecCapabilities capabilities =
           info.getCapabilitiesForType(VP8_MIME_TYPE);
       for (int colorFormat : capabilities.colorFormats) {
         Log.d(TAG, "   Color: 0x" + Integer.toHexString(colorFormat));
       }
-
-      // Check if this is supported HW decoder
-      for (String hwCodecPrefix : supportedHwCodecPrefixes) {
-        if (!name.startsWith(hwCodecPrefix)) {
-          continue;
-        }
-        // Check if codec supports either yuv420 or nv12
-        for (int supportedColorFormat : supportedColorList) {
-          for (int codecColorFormat : capabilities.colorFormats) {
-            if (codecColorFormat == supportedColorFormat) {
-              // Found supported HW VP8 decoder
-              Log.d(TAG, "Found target decoder " + name +
-                  ". Color: 0x" + Integer.toHexString(codecColorFormat));
-              return new DecoderProperties(name, codecColorFormat);
-            }
+      for (int supportedColorFormat : supportedColorList) {
+        for (int codecColorFormat : capabilities.colorFormats) {
+          if (codecColorFormat == supportedColorFormat) {
+            // Found supported HW VP8 decoder.
+            Log.d(TAG, "Found target decoder " + name +
+                ". Color: 0x" + Integer.toHexString(codecColorFormat));
+            return new DecoderProperties(name, codecColorFormat);
           }
         }
       }
@@ -146,31 +171,166 @@
     }
   }
 
-  private boolean initDecode(int width, int height) {
+  private void checkEglError(String msg) {
+    int error;
+    if ((error = EGL14.eglGetError()) != EGL14.EGL_SUCCESS) {
+      Log.e(TAG, msg + ": EGL Error: 0x" + Integer.toHexString(error));
+      throw new RuntimeException(
+          msg + ": EGL error: 0x" + Integer.toHexString(error));
+    }
+  }
+
+  private void checkGlError(String msg) {
+    int error;
+    if ((error = GLES20.glGetError()) != GLES20.GL_NO_ERROR) {
+      Log.e(TAG, msg + ": GL Error: 0x" + Integer.toHexString(error));
+      throw new RuntimeException(
+          msg + ": GL Error: 0x " + Integer.toHexString(error));
+    }
+  }
+
+  private void eglSetup(EGLContext sharedContext, int width, int height) {
+    Log.d(TAG, "EGL setup");
+    if (sharedContext == null) {
+      sharedContext = EGL14.EGL_NO_CONTEXT;
+    }
+    eglDisplay = EGL14.eglGetDisplay(EGL14.EGL_DEFAULT_DISPLAY);
+    if (eglDisplay == EGL14.EGL_NO_DISPLAY) {
+      throw new RuntimeException("Unable to get EGL14 display");
+    }
+    int[] version = new int[2];
+    if (!EGL14.eglInitialize(eglDisplay, version, 0, version, 1)) {
+      throw new RuntimeException("Unable to initialize EGL14");
+    }
+
+    // Configure EGL for pbuffer and OpenGL ES 2.0.
+    int[] attribList = {
+      EGL14.EGL_RED_SIZE, 8,
+      EGL14.EGL_GREEN_SIZE, 8,
+      EGL14.EGL_BLUE_SIZE, 8,
+      EGL14.EGL_RENDERABLE_TYPE, EGL14.EGL_OPENGL_ES2_BIT,
+      EGL14.EGL_SURFACE_TYPE, EGL14.EGL_PBUFFER_BIT,
+      EGL14.EGL_NONE
+    };
+    EGLConfig[] configs = new EGLConfig[1];
+    int[] numConfigs = new int[1];
+    if (!EGL14.eglChooseConfig(eglDisplay, attribList, 0, configs, 0,
+        configs.length, numConfigs, 0)) {
+      throw new RuntimeException("Unable to find RGB888 EGL config");
+    }
+
+    // Configure context for OpenGL ES 2.0.
+    int[] attrib_list = {
+      EGL14.EGL_CONTEXT_CLIENT_VERSION, 2,
+      EGL14.EGL_NONE
+    };
+    eglContext = EGL14.eglCreateContext(eglDisplay, configs[0], sharedContext,
+        attrib_list, 0);
+    checkEglError("eglCreateContext");
+    if (eglContext == null) {
+      throw new RuntimeException("Null EGL context");
+    }
+
+    // Create a pbuffer surface.
+    int[] surfaceAttribs = {
+      EGL14.EGL_WIDTH, width,
+      EGL14.EGL_HEIGHT, height,
+      EGL14.EGL_NONE
+    };
+    eglSurface = EGL14.eglCreatePbufferSurface(eglDisplay, configs[0],
+        surfaceAttribs, 0);
+    checkEglError("eglCreatePbufferSurface");
+    if (eglSurface == null) {
+      throw new RuntimeException("EGL surface was null");
+    }
+  }
+
+  private void eglRelease() {
+    Log.d(TAG, "EGL release");
+    if (eglDisplay != EGL14.EGL_NO_DISPLAY) {
+      EGL14.eglDestroySurface(eglDisplay, eglSurface);
+      EGL14.eglDestroyContext(eglDisplay, eglContext);
+      EGL14.eglReleaseThread();
+      EGL14.eglTerminate(eglDisplay);
+    }
+    eglDisplay = EGL14.EGL_NO_DISPLAY;
+    eglContext = EGL14.EGL_NO_CONTEXT;
+    eglSurface = EGL14.EGL_NO_SURFACE;
+  }
+
+
+  private void makeCurrent() {
+    if (!EGL14.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) {
+      throw new RuntimeException("eglMakeCurrent failed");
+    }
+  }
+
+  private boolean initDecode(int width, int height, boolean useSurface,
+      EGLContext sharedContext) {
     if (mediaCodecThread != null) {
       throw new RuntimeException("Forgot to release()?");
     }
+    if (useSurface && sharedContext == null) {
+      throw new RuntimeException("No shared EGL context.");
+    }
     DecoderProperties properties = findVp8HwDecoder();
     if (properties == null) {
       throw new RuntimeException("Cannot find HW VP8 decoder");
     }
     Log.d(TAG, "Java initDecode: " + width + " x " + height +
-        ". Color: 0x" + Integer.toHexString(properties.colorFormat));
+        ". Color: 0x" + Integer.toHexString(properties.colorFormat) +
+        ". Use Surface: " + useSurface );
+    if (sharedContext != null) {
+      Log.d(TAG, "Decoder shared EGL Context: " + sharedContext);
+    }
     mediaCodecThread = Thread.currentThread();
     try {
+      Surface decodeSurface = null;
       this.width = width;
       this.height = height;
+      this.useSurface = useSurface;
       stride = width;
       sliceHeight = height;
+
+      if (useSurface) {
+        // Create shared EGL context.
+        eglSetup(sharedContext, width, height);
+        makeCurrent();
+
+        // Create output surface
+        int[] textures = new int[1];
+        GLES20.glGenTextures(1, textures, 0);
+        checkGlError("glGenTextures");
+        textureID = textures[0];
+        GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, textureID);
+        checkGlError("glBindTexture mTextureID");
+
+        GLES20.glTexParameterf(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
+            GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
+        GLES20.glTexParameterf(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
+            GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
+        GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
+            GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+        GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
+            GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+        checkGlError("glTexParameter");
+        Log.d(TAG, "Video decoder TextureID = " + textureID);
+        surfaceTexture = new SurfaceTexture(textureID);
+        surface = new Surface(surfaceTexture);
+        decodeSurface = surface;
+     }
+
       MediaFormat format =
           MediaFormat.createVideoFormat(VP8_MIME_TYPE, width, height);
-      format.setInteger(MediaFormat.KEY_COLOR_FORMAT, properties.colorFormat);
+      if (!useSurface) {
+        format.setInteger(MediaFormat.KEY_COLOR_FORMAT, properties.colorFormat);
+      }
       Log.d(TAG, "  Format: " + format);
       mediaCodec = MediaCodec.createByCodecName(properties.codecName);
       if (mediaCodec == null) {
         return false;
       }
-      mediaCodec.configure(format, null, null, 0);
+      mediaCodec.configure(format, decodeSurface, null, 0);
       mediaCodec.start();
       colorFormat = properties.colorFormat;
       outputBuffers = mediaCodec.getOutputBuffers();
@@ -195,6 +355,19 @@
     }
     mediaCodec = null;
     mediaCodecThread = null;
+    if (useSurface) {
+      surface.release();
+      surface = null;
+      surfaceTexture = null;
+      if (textureID >= 0) {
+        int[] textures = new int[1];
+        textures[0] = textureID;
+        Log.d(TAG, "Delete video decoder TextureID " + textureID);
+        GLES20.glDeleteTextures(1, textures, 0);
+        checkGlError("glDeleteTextures");
+      }
+      eglRelease();
+    }
   }
 
   // Dequeue an input buffer and return its index, -1 if no input buffer is
@@ -202,7 +375,7 @@
   private int dequeueInputBuffer() {
     checkOnMediaCodecThread();
     try {
-      return mediaCodec.dequeueInputBuffer(DEQUEUE_TIMEOUT);
+      return mediaCodec.dequeueInputBuffer(DEQUEUE_INPUT_TIMEOUT);
     } catch (IllegalStateException e) {
       Log.e(TAG, "dequeueIntputBuffer failed", e);
       return -2;
@@ -224,23 +397,40 @@
     }
   }
 
+  // Helper struct for dequeueOutputBuffer() below.
+  private static class DecoderOutputBufferInfo {
+    public DecoderOutputBufferInfo(
+        int index, int offset, int size, long presentationTimestampUs) {
+      this.index = index;
+      this.offset = offset;
+      this.size = size;
+      this.presentationTimestampUs = presentationTimestampUs;
+    }
+
+    private final int index;
+    private final int offset;
+    private final int size;
+    private final long presentationTimestampUs;
+  }
+
   // Dequeue and return an output buffer index, -1 if no output
   // buffer available or -2 if error happened.
-  private int dequeueOutputBuffer() {
+  private DecoderOutputBufferInfo dequeueOutputBuffer(int dequeueTimeoutUs) {
     checkOnMediaCodecThread();
     try {
       MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
-      int result = mediaCodec.dequeueOutputBuffer(info, DEQUEUE_TIMEOUT);
+      int result = mediaCodec.dequeueOutputBuffer(info, dequeueTimeoutUs);
       while (result == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED ||
           result == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
         if (result == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
           outputBuffers = mediaCodec.getOutputBuffers();
+          Log.d(TAG, "Output buffers changed: " + outputBuffers.length);
         } else if (result == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
           MediaFormat format = mediaCodec.getOutputFormat();
           Log.d(TAG, "Format changed: " + format.toString());
           width = format.getInteger(MediaFormat.KEY_WIDTH);
           height = format.getInteger(MediaFormat.KEY_HEIGHT);
-          if (format.containsKey(MediaFormat.KEY_COLOR_FORMAT)) {
+          if (!useSurface && format.containsKey(MediaFormat.KEY_COLOR_FORMAT)) {
             colorFormat = format.getInteger(MediaFormat.KEY_COLOR_FORMAT);
             Log.d(TAG, "Color: 0x" + Integer.toHexString(colorFormat));
             // Check if new color space is supported.
@@ -253,7 +443,7 @@
             }
             if (!validColorFormat) {
               Log.e(TAG, "Non supported color format");
-              return -2;
+              return new DecoderOutputBufferInfo(-1, 0, 0, -1);
             }
           }
           if (format.containsKey("stride")) {
@@ -267,21 +457,28 @@
           stride = Math.max(width, stride);
           sliceHeight = Math.max(height, sliceHeight);
         }
-        result = mediaCodec.dequeueOutputBuffer(info, DEQUEUE_TIMEOUT);
+        result = mediaCodec.dequeueOutputBuffer(info, dequeueTimeoutUs);
       }
-      return result;
+      if (result >= 0) {
+        return new DecoderOutputBufferInfo(result, info.offset, info.size,
+            info.presentationTimeUs);
+      }
+      return null;
     } catch (IllegalStateException e) {
       Log.e(TAG, "dequeueOutputBuffer failed", e);
-      return -2;
+      return new DecoderOutputBufferInfo(-1, 0, 0, -1);
     }
   }
 
   // Release a dequeued output buffer back to the codec for re-use.  Return
   // false if the codec is no longer operable.
-  private boolean releaseOutputBuffer(int index) {
+  private boolean releaseOutputBuffer(int index, boolean render) {
     checkOnMediaCodecThread();
     try {
-      mediaCodec.releaseOutputBuffer(index, false);
+      if (!useSurface) {
+        render = false;
+      }
+      mediaCodec.releaseOutputBuffer(index, render);
       return true;
     } catch (IllegalStateException e) {
       Log.e(TAG, "releaseOutputBuffer failed", e);
diff --git a/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java b/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
index 45b8d6a..cf11573 100644
--- a/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
+++ b/app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java
@@ -78,7 +78,7 @@
 
   // Helper struct for findVp8HwEncoder() below.
   private static class EncoderProperties {
-    EncoderProperties(String codecName, int colorFormat) {
+    public EncoderProperties(String codecName, int colorFormat) {
       this.codecName = codecName;
       this.colorFormat = colorFormat;
     }
@@ -106,26 +106,33 @@
         continue;  // No VP8 support in this codec; try the next one.
       }
       Log.d(TAG, "Found candidate encoder " + name);
+
+      // Check if this is supported HW encoder.
+      boolean supportedCodec = false;
+      for (String hwCodecPrefix : supportedHwCodecPrefixes) {
+        if (name.startsWith(hwCodecPrefix)) {
+          supportedCodec = true;
+          break;
+        }
+      }
+      if (!supportedCodec) {
+        continue;
+      }
+
       CodecCapabilities capabilities =
           info.getCapabilitiesForType(VP8_MIME_TYPE);
       for (int colorFormat : capabilities.colorFormats) {
         Log.d(TAG, "   Color: 0x" + Integer.toHexString(colorFormat));
       }
 
-      // Check if this is supported HW encoder
-      for (String hwCodecPrefix : supportedHwCodecPrefixes) {
-        if (!name.startsWith(hwCodecPrefix)) {
-          continue;
-        }
-        // Check if codec supports either yuv420 or nv12
-        for (int supportedColorFormat : supportedColorList) {
-          for (int codecColorFormat : capabilities.colorFormats) {
-            if (codecColorFormat == supportedColorFormat) {
-              // Found supported HW VP8 encoder
-              Log.d(TAG, "Found target encoder " + name +
-                  ". Color: 0x" + Integer.toHexString(codecColorFormat));
-              return new EncoderProperties(name, codecColorFormat);
-            }
+      // Check if codec supports either yuv420 or nv12.
+      for (int supportedColorFormat : supportedColorList) {
+        for (int codecColorFormat : capabilities.colorFormats) {
+          if (codecColorFormat == supportedColorFormat) {
+            // Found supported HW VP8 encoder.
+            Log.d(TAG, "Found target encoder " + name +
+                ". Color: 0x" + Integer.toHexString(codecColorFormat));
+            return new EncoderProperties(name, codecColorFormat);
           }
         }
       }
@@ -137,15 +144,6 @@
     return findVp8HwEncoder() != null;
   }
 
-  private static int bitRate(int kbps) {
-    // webrtc "kilo" means 1000, not 1024.  Apparently.
-    // (and the price for overshooting is frame-dropping as webrtc enforces its
-    // bandwidth estimation, which is unpleasant).
-    // Since the HW encoder in the N5 overshoots, we clamp to a bit less than
-    // the requested rate.  Sad but true.  Bug 3194.
-    return kbps * 950;
-  }
-
   private void checkOnMediaCodecThread() {
     if (mediaCodecThread.getId() != Thread.currentThread().getId()) {
       throw new RuntimeException(
@@ -170,7 +168,7 @@
     try {
       MediaFormat format =
           MediaFormat.createVideoFormat(VP8_MIME_TYPE, width, height);
-      format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate(kbps));
+      format.setInteger(MediaFormat.KEY_BIT_RATE, 1000 * kbps);
       format.setInteger("bitrate-mode", VIDEO_ControlRateConstant);
       format.setInteger(MediaFormat.KEY_COLOR_FORMAT, properties.colorFormat);
       // Default WebRTC settings
@@ -241,7 +239,7 @@
     Log.v(TAG, "setRates: " + kbps + " kbps. Fps: " + frameRateIgnored);
     try {
       Bundle params = new Bundle();
-      params.putInt(MediaCodec.PARAMETER_KEY_VIDEO_BITRATE, bitRate(kbps));
+      params.putInt(MediaCodec.PARAMETER_KEY_VIDEO_BITRATE, 1000 * kbps);
       mediaCodec.setParameters(params);
       return true;
     } catch (IllegalStateException e) {
diff --git a/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java b/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
index 441f37b..f9f96e7 100644
--- a/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
+++ b/app/webrtc/java/src/org/webrtc/PeerConnectionFactory.java
@@ -46,8 +46,12 @@
   // Callers may specify either |initializeAudio| or |initializeVideo| as false
   // to skip initializing the respective engine (and avoid the need for the
   // respective permissions).
+  // |renderEGLContext| can be provided to suport HW video decoding to
+  // texture and will be used to create a shared EGL context on video
+  // decoding thread.
   public static native boolean initializeAndroidGlobals(
-      Object context, boolean initializeAudio, boolean initializeVideo);
+      Object context, boolean initializeAudio, boolean initializeVideo,
+      Object renderEGLContext);
 
   public PeerConnectionFactory() {
     nativeFactory = nativeCreatePeerConnectionFactory();
diff --git a/app/webrtc/java/src/org/webrtc/VideoRenderer.java b/app/webrtc/java/src/org/webrtc/VideoRenderer.java
index 4cc341a..19df137 100644
--- a/app/webrtc/java/src/org/webrtc/VideoRenderer.java
+++ b/app/webrtc/java/src/org/webrtc/VideoRenderer.java
@@ -44,6 +44,9 @@
     public final int height;
     public final int[] yuvStrides;
     public final ByteBuffer[] yuvPlanes;
+    public final boolean yuvFrame;
+    public Object textureObject;
+    public int textureId;
 
     /**
      * Construct a frame of the given dimensions with the specified planar
@@ -62,25 +65,72 @@
         yuvPlanes[2] = ByteBuffer.allocateDirect(yuvStrides[2] * height);
       }
       this.yuvPlanes = yuvPlanes;
+      this.yuvFrame = true;
+    }
+
+    /**
+     * Construct a texture frame of the given dimensions with data in SurfaceTexture
+     */
+    public I420Frame(
+        int width, int height, Object textureObject, int textureId) {
+      this.width = width;
+      this.height = height;
+      this.yuvStrides = null;
+      this.yuvPlanes = null;
+      this.textureObject = textureObject;
+      this.textureId = textureId;
+      this.yuvFrame = false;
     }
 
     /**
      * Copy the planes out of |source| into |this| and return |this|.  Calling
-     * this with mismatched frame dimensions is a programming error and will
-     * likely crash.
+     * this with mismatched frame dimensions or frame type is a programming
+     * error and will likely crash.
      */
     public I420Frame copyFrom(I420Frame source) {
-      if (!Arrays.equals(yuvStrides, source.yuvStrides) ||
-          width != source.width || height != source.height) {
-        throw new RuntimeException("Mismatched dimensions!  Source: " +
+      if (source.yuvFrame && yuvFrame) {
+        if (!Arrays.equals(yuvStrides, source.yuvStrides) ||
+            width != source.width || height != source.height) {
+          throw new RuntimeException("Mismatched dimensions!  Source: " +
+              source.toString() + ", destination: " + toString());
+        }
+        copyPlane(source.yuvPlanes[0], yuvPlanes[0]);
+        copyPlane(source.yuvPlanes[1], yuvPlanes[1]);
+        copyPlane(source.yuvPlanes[2], yuvPlanes[2]);
+        return this;
+      } else if (!source.yuvFrame && !yuvFrame) {
+        textureObject = source.textureObject;
+        textureId = source.textureId;
+        return this;
+      } else {
+        throw new RuntimeException("Mismatched frame types!  Source: " +
             source.toString() + ", destination: " + toString());
       }
-      copyPlane(source.yuvPlanes[0], yuvPlanes[0]);
-      copyPlane(source.yuvPlanes[1], yuvPlanes[1]);
-      copyPlane(source.yuvPlanes[2], yuvPlanes[2]);
-      return this;
     }
 
+    public I420Frame copyFrom(byte[] yuvData) {
+        if (yuvData.length < width * height * 3 / 2) {
+          throw new RuntimeException("Wrong arrays size: " + yuvData.length);
+        }
+        if (!yuvFrame) {
+          throw new RuntimeException("Can not feed yuv data to texture frame");
+        }
+        int planeSize = width * height;
+        ByteBuffer[] planes = new ByteBuffer[3];
+        planes[0] = ByteBuffer.wrap(yuvData, 0, planeSize);
+        planes[1] = ByteBuffer.wrap(yuvData, planeSize, planeSize / 4);
+        planes[2] = ByteBuffer.wrap(yuvData, planeSize + planeSize / 4,
+            planeSize / 4);
+        for (int i = 0; i < 3; i++) {
+          yuvPlanes[i].position(0);
+          yuvPlanes[i].put(planes[i]);
+          yuvPlanes[i].position(0);
+          yuvPlanes[i].limit(yuvPlanes[i].capacity());
+        }
+        return this;
+      }
+
+
     @Override
     public String toString() {
       return width + "x" + height + ":" + yuvStrides[0] + ":" + yuvStrides[1] +
@@ -126,11 +176,16 @@
   }
 
   public void dispose() {
-    free(nativeVideoRenderer);
+    if (callbacks == null) {
+      freeGuiVideoRenderer(nativeVideoRenderer);
+    } else {
+      freeWrappedVideoRenderer(nativeVideoRenderer);
+    }
   }
 
   private static native long nativeCreateGuiVideoRenderer(int x, int y);
   private static native long nativeWrapVideoRenderer(Callbacks callbacks);
 
-  private static native void free(long nativeVideoRenderer);
+  private static native void freeGuiVideoRenderer(long nativeVideoRenderer);
+  private static native void freeWrappedVideoRenderer(long nativeVideoRenderer);
 }
diff --git a/app/webrtc/jsepsessiondescription.cc b/app/webrtc/jsepsessiondescription.cc
index 23a7cdd..c318d91 100644
--- a/app/webrtc/jsepsessiondescription.cc
+++ b/app/webrtc/jsepsessiondescription.cc
@@ -57,11 +57,19 @@
 const char SessionDescriptionInterface::kAnswer[] = "answer";
 
 const int JsepSessionDescription::kDefaultVideoCodecId = 100;
-const int JsepSessionDescription::kDefaultVideoCodecFramerate = 30;
+// This is effectively a max value of the frame rate. 30 is default from camera.
+const int JsepSessionDescription::kDefaultVideoCodecFramerate = 60;
 const char JsepSessionDescription::kDefaultVideoCodecName[] = "VP8";
 // Used as default max video codec size before we have it in signaling.
-const int JsepSessionDescription::kMaxVideoCodecWidth = 3840;
-const int JsepSessionDescription::kMaxVideoCodecHeight = 2160;
+#if defined(ANDROID)
+// Limit default max video codec size for Android to avoid
+// HW VP8 codec initialization failure for resolution higher than 720p.
+const int JsepSessionDescription::kMaxVideoCodecWidth = 1280;
+const int JsepSessionDescription::kMaxVideoCodecHeight = 720;
+#else
+const int JsepSessionDescription::kMaxVideoCodecWidth = 1920;
+const int JsepSessionDescription::kMaxVideoCodecHeight = 1080;
+#endif
 const int JsepSessionDescription::kDefaultVideoCodecPreference = 1;
 
 SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
diff --git a/app/webrtc/mediaconstraintsinterface.cc b/app/webrtc/mediaconstraintsinterface.cc
index d59716e..a7b88aa 100644
--- a/app/webrtc/mediaconstraintsinterface.cc
+++ b/app/webrtc/mediaconstraintsinterface.cc
@@ -95,6 +95,8 @@
     "googSuspendBelowMinBitrate";
 const char MediaConstraintsInterface::kNumUnsignalledRecvStreams[] =
     "googNumUnsignalledRecvStreams";
+const char MediaConstraintsInterface::kCombinedAudioVideoBwe[] =
+    "googCombinedAudioVideoBwe";
 const char MediaConstraintsInterface::kScreencastMinBitrate[] =
     "googScreencastMinBitrate";
 // TODO(ronghuawu): Remove once cpu overuse detection is stable.
diff --git a/app/webrtc/mediaconstraintsinterface.h b/app/webrtc/mediaconstraintsinterface.h
index d74ffab..045da79 100644
--- a/app/webrtc/mediaconstraintsinterface.h
+++ b/app/webrtc/mediaconstraintsinterface.h
@@ -117,6 +117,8 @@
       // googSuspendBelowMinBitrate
   static const char kNumUnsignalledRecvStreams[];
       // googNumUnsignalledRecvStreams
+  // Constraint to enable combined audio+video bandwidth estimation.
+  static const char kCombinedAudioVideoBwe[];  // googCombinedAudioVideoBwe
   static const char kScreencastMinBitrate[];  // googScreencastMinBitrate
   static const char kCpuOveruseDetection[];  // googCpuOveruseDetection
   static const char kCpuUnderuseThreshold[];  // googCpuUnderuseThreshold
diff --git a/app/webrtc/mediastreamsignaling.cc b/app/webrtc/mediastreamsignaling.cc
index c4a8281..df51ba1 100644
--- a/app/webrtc/mediastreamsignaling.cc
+++ b/app/webrtc/mediastreamsignaling.cc
@@ -51,9 +51,9 @@
 using rtc::scoped_ptr;
 using rtc::scoped_refptr;
 
-static bool ParseConstraints(
+static bool ParseConstraintsForAnswer(
     const MediaConstraintsInterface* constraints,
-    cricket::MediaSessionOptions* options, bool is_answer) {
+    cricket::MediaSessionOptions* options) {
   bool value;
   size_t mandatory_constraints_satisfied = 0;
 
@@ -82,7 +82,7 @@
     // kOfferToReceiveVideo defaults to false according to spec. But
     // if it is an answer and video is offered, we should still accept video
     // per default.
-    options->has_video |= is_answer;
+    options->has_video = true;
   }
 
   if (FindConstraint(constraints,
@@ -133,6 +133,55 @@
       (value <= Options::kMaxOfferToReceiveMedia);
 }
 
+// Add the stream and RTP data channel info to |session_options|.
+static void SetStreams(
+    cricket::MediaSessionOptions* session_options,
+    rtc::scoped_refptr<StreamCollection> streams,
+    const MediaStreamSignaling::RtpDataChannels& rtp_data_channels) {
+  session_options->streams.clear();
+  if (streams != NULL) {
+    for (size_t i = 0; i < streams->count(); ++i) {
+      MediaStreamInterface* stream = streams->at(i);
+
+      AudioTrackVector audio_tracks(stream->GetAudioTracks());
+
+      // For each audio track in the stream, add it to the MediaSessionOptions.
+      for (size_t j = 0; j < audio_tracks.size(); ++j) {
+        scoped_refptr<MediaStreamTrackInterface> track(audio_tracks[j]);
+        session_options->AddStream(
+            cricket::MEDIA_TYPE_AUDIO, track->id(), stream->label());
+      }
+
+      VideoTrackVector video_tracks(stream->GetVideoTracks());
+
+      // For each video track in the stream, add it to the MediaSessionOptions.
+      for (size_t j = 0; j < video_tracks.size(); ++j) {
+        scoped_refptr<MediaStreamTrackInterface> track(video_tracks[j]);
+        session_options->AddStream(
+            cricket::MEDIA_TYPE_VIDEO, track->id(), stream->label());
+      }
+    }
+  }
+
+  // Check for data channels.
+  MediaStreamSignaling::RtpDataChannels::const_iterator data_channel_it =
+      rtp_data_channels.begin();
+  for (; data_channel_it != rtp_data_channels.end(); ++data_channel_it) {
+    const DataChannel* channel = data_channel_it->second;
+    if (channel->state() == DataChannel::kConnecting ||
+        channel->state() == DataChannel::kOpen) {
+      // |streamid| and |sync_label| are both set to the DataChannel label
+      // here so they can be signaled the same way as MediaStreams and Tracks.
+      // For MediaStreams, the sync_label is the MediaStream label and the
+      // track label is the same as |streamid|.
+      const std::string& streamid = channel->label();
+      const std::string& sync_label = channel->label();
+      session_options->AddStream(
+          cricket::MEDIA_TYPE_DATA, streamid, sync_label);
+    }
+  }
+}
+
 // Factory class for creating remote MediaStreams and MediaStreamTracks.
 class RemoteMediaStreamFactory {
  public:
@@ -192,8 +241,6 @@
                                                           channel_manager)),
       last_allocated_sctp_even_sid_(-2),
       last_allocated_sctp_odd_sid_(-1) {
-  options_.has_video = false;
-  options_.has_audio = false;
 }
 
 MediaStreamSignaling::~MediaStreamSignaling() {
@@ -279,12 +326,13 @@
     LOG(LS_ERROR) << "Failed to create DataChannel from the OPEN message.";
     return false;
   }
-  sctp_data_channels_.push_back(channel);
+
   stream_observer_->OnAddDataChannel(channel);
   return true;
 }
 
 void MediaStreamSignaling::RemoveSctpDataChannel(int sid) {
+  ASSERT(sid >= 0);
   for (SctpDataChannels::iterator iter = sctp_data_channels_.begin();
        iter != sctp_data_channels_.end();
        ++iter) {
@@ -377,40 +425,38 @@
     return false;
   }
 
-  UpdateSessionOptions();
+  session_options->has_audio = false;
+  session_options->has_video = false;
+  SetStreams(session_options, local_streams_, rtp_data_channels_);
 
-  // |options.has_audio| and |options.has_video| can only change from false to
-  // true, but never change from true to false. This is to make sure
-  // CreateOffer / CreateAnswer doesn't remove a media content
-  // description that has been created.
-  if (rtc_options.offer_to_receive_audio > 0) {
-    options_.has_audio = true;
+  // If |offer_to_receive_[audio/video]| is undefined, respect the flags set
+  // from SetStreams. Otherwise, overwrite it based on |rtc_options|.
+  if (rtc_options.offer_to_receive_audio != RTCOfferAnswerOptions::kUndefined) {
+    session_options->has_audio = rtc_options.offer_to_receive_audio > 0;
   }
-  if (rtc_options.offer_to_receive_video > 0) {
-    options_.has_video = true;
+  if (rtc_options.offer_to_receive_video != RTCOfferAnswerOptions::kUndefined) {
+    session_options->has_video = rtc_options.offer_to_receive_video > 0;
   }
-  options_.vad_enabled = rtc_options.voice_activity_detection;
-  options_.transport_options.ice_restart = rtc_options.ice_restart;
-  options_.bundle_enabled = rtc_options.use_rtp_mux;
 
-  options_.bundle_enabled = EvaluateNeedForBundle(options_);
-  *session_options = options_;
+  session_options->vad_enabled = rtc_options.voice_activity_detection;
+  session_options->transport_options.ice_restart = rtc_options.ice_restart;
+  session_options->bundle_enabled = rtc_options.use_rtp_mux;
+
+  session_options->bundle_enabled = EvaluateNeedForBundle(*session_options);
   return true;
 }
 
 bool MediaStreamSignaling::GetOptionsForAnswer(
     const MediaConstraintsInterface* constraints,
     cricket::MediaSessionOptions* options) {
-  UpdateSessionOptions();
+  options->has_audio = false;
+  options->has_video = false;
+  SetStreams(options, local_streams_, rtp_data_channels_);
 
-  // Copy the |options_| to not let the flag MediaSessionOptions::has_audio and
-  // MediaSessionOptions::has_video affect subsequent offers.
-  cricket::MediaSessionOptions current_options = options_;
-  if (!ParseConstraints(constraints, &current_options, true)) {
+  if (!ParseConstraintsForAnswer(constraints, options)) {
     return false;
   }
-  current_options.bundle_enabled = EvaluateNeedForBundle(current_options);
-  *options = current_options;
+  options->bundle_enabled = EvaluateNeedForBundle(*options);
   return true;
 }
 
@@ -545,54 +591,6 @@
   }
 }
 
-void MediaStreamSignaling::UpdateSessionOptions() {
-  options_.streams.clear();
-  if (local_streams_ != NULL) {
-    for (size_t i = 0; i < local_streams_->count(); ++i) {
-      MediaStreamInterface* stream = local_streams_->at(i);
-
-      AudioTrackVector audio_tracks(stream->GetAudioTracks());
-      if (!audio_tracks.empty()) {
-        options_.has_audio = true;
-      }
-
-      // For each audio track in the stream, add it to the MediaSessionOptions.
-      for (size_t j = 0; j < audio_tracks.size(); ++j) {
-        scoped_refptr<MediaStreamTrackInterface> track(audio_tracks[j]);
-        options_.AddStream(cricket::MEDIA_TYPE_AUDIO, track->id(),
-                           stream->label());
-      }
-
-      VideoTrackVector video_tracks(stream->GetVideoTracks());
-      if (!video_tracks.empty()) {
-        options_.has_video = true;
-      }
-      // For each video track in the stream, add it to the MediaSessionOptions.
-      for (size_t j = 0; j < video_tracks.size(); ++j) {
-        scoped_refptr<MediaStreamTrackInterface> track(video_tracks[j]);
-        options_.AddStream(cricket::MEDIA_TYPE_VIDEO, track->id(),
-                           stream->label());
-      }
-    }
-  }
-
-  // Check for data channels.
-  RtpDataChannels::const_iterator data_channel_it = rtp_data_channels_.begin();
-  for (; data_channel_it != rtp_data_channels_.end(); ++data_channel_it) {
-    const DataChannel* channel = data_channel_it->second;
-    if (channel->state() == DataChannel::kConnecting ||
-        channel->state() == DataChannel::kOpen) {
-      // |streamid| and |sync_label| are both set to the DataChannel label
-      // here so they can be signaled the same way as MediaStreams and Tracks.
-      // For MediaStreams, the sync_label is the MediaStream label and the
-      // track label is the same as |streamid|.
-      const std::string& streamid = channel->label();
-      const std::string& sync_label = channel->label();
-      options_.AddStream(cricket::MEDIA_TYPE_DATA, streamid, sync_label);
-    }
-  }
-}
-
 void MediaStreamSignaling::UpdateRemoteStreamsList(
     const cricket::StreamParamsVec& streams,
     cricket::MediaType media_type,
diff --git a/app/webrtc/mediastreamsignaling.h b/app/webrtc/mediastreamsignaling.h
index 7f17971..d4b1be8 100644
--- a/app/webrtc/mediastreamsignaling.h
+++ b/app/webrtc/mediastreamsignaling.h
@@ -160,6 +160,9 @@
 
 class MediaStreamSignaling : public sigslot::has_slots<> {
  public:
+  typedef std::map<std::string, rtc::scoped_refptr<DataChannel> >
+      RtpDataChannels;
+
   MediaStreamSignaling(rtc::Thread* signaling_thread,
                        MediaStreamSignalingObserver* stream_observer,
                        cricket::ChannelManager* channel_manager);
@@ -289,8 +292,6 @@
   };
   typedef std::vector<TrackInfo> TrackInfos;
 
-  void UpdateSessionOptions();
-
   // Makes sure a MediaStream Track is created for each StreamParam in
   // |streams|. |media_type| is the type of the |streams| and can be either
   // audio or video.
@@ -378,7 +379,6 @@
   RemotePeerInfo remote_info_;
   rtc::Thread* signaling_thread_;
   DataChannelFactory* data_channel_factory_;
-  cricket::MediaSessionOptions options_;
   MediaStreamSignalingObserver* stream_observer_;
   rtc::scoped_refptr<StreamCollection> local_streams_;
   rtc::scoped_refptr<StreamCollection> remote_streams_;
@@ -392,8 +392,6 @@
   int last_allocated_sctp_even_sid_;
   int last_allocated_sctp_odd_sid_;
 
-  typedef std::map<std::string, rtc::scoped_refptr<DataChannel> >
-      RtpDataChannels;
   typedef std::vector<rtc::scoped_refptr<DataChannel> > SctpDataChannels;
 
   RtpDataChannels rtp_data_channels_;
diff --git a/app/webrtc/mediastreamsignaling_unittest.cc b/app/webrtc/mediastreamsignaling_unittest.cc
index fa83646..84f67b9 100644
--- a/app/webrtc/mediastreamsignaling_unittest.cc
+++ b/app/webrtc/mediastreamsignaling_unittest.cc
@@ -261,14 +261,20 @@
 class FakeDataChannelFactory : public webrtc::DataChannelFactory {
  public:
   FakeDataChannelFactory(FakeDataChannelProvider* provider,
-                         cricket::DataChannelType dct)
-      : provider_(provider), type_(dct) {}
+                         cricket::DataChannelType dct,
+                         webrtc::MediaStreamSignaling* media_stream_signaling)
+      : provider_(provider),
+        type_(dct),
+        media_stream_signaling_(media_stream_signaling) {}
 
   virtual rtc::scoped_refptr<webrtc::DataChannel> CreateDataChannel(
       const std::string& label,
       const webrtc::InternalDataChannelInit* config) {
     last_init_ = *config;
-    return webrtc::DataChannel::Create(provider_, type_, label, *config);
+    rtc::scoped_refptr<webrtc::DataChannel> data_channel =
+        webrtc::DataChannel::Create(provider_, type_, label, *config);
+    media_stream_signaling_->AddDataChannel(data_channel);
+    return data_channel;
   }
 
   const webrtc::InternalDataChannelInit& last_init() const {
@@ -278,6 +284,7 @@
  private:
   FakeDataChannelProvider* provider_;
   cricket::DataChannelType type_;
+  webrtc::MediaStreamSignaling* media_stream_signaling_;
   webrtc::InternalDataChannelInit last_init_;
 };
 
@@ -775,8 +782,10 @@
   RTCOfferAnswerOptions default_rtc_options;
   EXPECT_TRUE(signaling_->GetOptionsForOffer(default_rtc_options,
                                              &updated_offer_options));
-  EXPECT_TRUE(updated_offer_options.has_audio);
-  EXPECT_TRUE(updated_offer_options.has_video);
+  // By default, |has_audio| or |has_video| are false if there is no media
+  // track.
+  EXPECT_FALSE(updated_offer_options.has_audio);
+  EXPECT_FALSE(updated_offer_options.has_video);
 }
 
 // This test verifies that the remote MediaStreams corresponding to a received
@@ -1263,7 +1272,8 @@
 // message.
 TEST_F(MediaStreamSignalingTest, CreateDataChannelFromOpenMessage) {
   FakeDataChannelFactory fake_factory(data_channel_provider_.get(),
-                                      cricket::DCT_SCTP);
+                                      cricket::DCT_SCTP,
+                                      signaling_.get());
   signaling_->SetDataChannelFactory(&fake_factory);
   webrtc::DataChannelInit config;
   config.id = 1;
@@ -1283,7 +1293,8 @@
   AddDataChannel(cricket::DCT_SCTP, "a", -1);
 
   FakeDataChannelFactory fake_factory(data_channel_provider_.get(),
-                                      cricket::DCT_SCTP);
+                                      cricket::DCT_SCTP,
+                                      signaling_.get());
   signaling_->SetDataChannelFactory(&fake_factory);
   webrtc::DataChannelInit config;
   config.id = 0;
@@ -1312,3 +1323,24 @@
   signaling_->OnRemoteSctpDataChannelClosed(config.id);
   EXPECT_EQ(webrtc::DataChannelInterface::kClosed, data_channel->state());
 }
+
+// Verifies that DataChannel added from OPEN message is added to
+// MediaStreamSignaling only once (webrtc issue 3778).
+TEST_F(MediaStreamSignalingTest, DataChannelFromOpenMessageAddedOnce) {
+  FakeDataChannelFactory fake_factory(data_channel_provider_.get(),
+                                      cricket::DCT_SCTP,
+                                      signaling_.get());
+  signaling_->SetDataChannelFactory(&fake_factory);
+  webrtc::DataChannelInit config;
+  config.id = 1;
+  rtc::Buffer payload;
+  webrtc::WriteDataChannelOpenMessage("a", config, &payload);
+  cricket::ReceiveDataParams params;
+  params.ssrc = config.id;
+  EXPECT_TRUE(signaling_->AddDataChannelFromOpenMessage(params, payload));
+  EXPECT_TRUE(signaling_->HasDataChannels());
+
+  // Removes the DataChannel and verifies that no DataChannel is left.
+  signaling_->RemoveSctpDataChannel(config.id);
+  EXPECT_FALSE(signaling_->HasDataChannels());
+}
diff --git a/app/webrtc/objc/RTCEAGLVideoView.m b/app/webrtc/objc/RTCEAGLVideoView.m
index 5365d98..faacef6 100644
--- a/app/webrtc/objc/RTCEAGLVideoView.m
+++ b/app/webrtc/objc/RTCEAGLVideoView.m
@@ -173,6 +173,7 @@
     return;
   }
   [_videoTrack removeRenderer:_videoRenderer];
+  self.i420Frame = nil;
   _videoTrack = videoTrack;
   [_videoTrack addRenderer:_videoRenderer];
   // TODO(tkchin): potentially handle changes in track state - e.g. render
@@ -191,11 +192,9 @@
 // This method is called when the GLKView's content is dirty and needs to be
 // redrawn. This occurs on main thread.
 - (void)glkView:(GLKView*)view drawInRect:(CGRect)rect {
-  if (self.i420Frame) {
-    // The renderer will draw the frame to the framebuffer corresponding to the
-    // one used by |view|.
-    [_glRenderer drawFrame:self.i420Frame];
-  }
+  // The renderer will draw the frame to the framebuffer corresponding to the
+  // one used by |view|.
+  [_glRenderer drawFrame:self.i420Frame];
 }
 
 #pragma mark - Private
diff --git a/app/webrtc/objc/RTCNSGLVideoView.m b/app/webrtc/objc/RTCNSGLVideoView.m
index 39f3678..292e792 100644
--- a/app/webrtc/objc/RTCNSGLVideoView.m
+++ b/app/webrtc/objc/RTCNSGLVideoView.m
@@ -116,6 +116,9 @@
   if (_videoTrack) {
     [_videoTrack removeRenderer:_videoRenderer];
     CVDisplayLinkStop(_displayLink);
+    // Clear contents.
+    self.i420Frame = nil;
+    [self drawFrame];
   }
   _videoTrack = videoTrack;
   if (_videoTrack) {
@@ -144,7 +147,7 @@
 
 - (void)drawFrame {
   RTCI420Frame* i420Frame = self.i420Frame;
-  if (i420Frame && self.glRenderer.lastDrawnFrame != i420Frame) {
+  if (self.glRenderer.lastDrawnFrame != i420Frame) {
     // This method may be called from CVDisplayLink callback which isn't on the
     // main thread so we have to lock the GL context before drawing.
     CGLLockContext([[self openGLContext] CGLContextObj]);
diff --git a/app/webrtc/objc/RTCOpenGLVideoRenderer.mm b/app/webrtc/objc/RTCOpenGLVideoRenderer.mm
index 9ee0216..5a24cf0 100644
--- a/app/webrtc/objc/RTCOpenGLVideoRenderer.mm
+++ b/app/webrtc/objc/RTCOpenGLVideoRenderer.mm
@@ -205,16 +205,18 @@
     return NO;
   }
   [self ensureGLContext];
-  if (![self updateTextureSizesForFrame:frame] ||
-      ![self updateTextureDataForFrame:frame]) {
-    return NO;
-  }
   glClear(GL_COLOR_BUFFER_BIT);
+  if (frame) {
+    if (![self updateTextureSizesForFrame:frame] ||
+        ![self updateTextureDataForFrame:frame]) {
+      return NO;
+    }
 #if !TARGET_OS_IPHONE
-  glBindVertexArray(_vertexArray);
+    glBindVertexArray(_vertexArray);
 #endif
-  glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
-  glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+    glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
+    glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+  }
 #if !TARGET_OS_IPHONE
   [_context flushBuffer];
 #endif
@@ -238,7 +240,6 @@
   }
   glUseProgram(_program);
   glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
-  glClearColor(0, 0, 0, 1);
   _isInitialized = YES;
 }
 
diff --git a/app/webrtc/objc/RTCVideoRenderer.mm b/app/webrtc/objc/RTCVideoRenderer.mm
index de03a1e..4cfe43a 100644
--- a/app/webrtc/objc/RTCVideoRenderer.mm
+++ b/app/webrtc/objc/RTCVideoRenderer.mm
@@ -30,10 +30,6 @@
 #endif
 
 #import "RTCVideoRenderer+Internal.h"
-
-#if TARGET_OS_IPHONE
-#import "RTCEAGLVideoView+Internal.h"
-#endif
 #import "RTCI420Frame+Internal.h"
 
 namespace webrtc {
@@ -62,9 +58,6 @@
 
 @implementation RTCVideoRenderer {
   rtc::scoped_ptr<webrtc::RTCVideoRendererAdapter> _adapter;
-#if TARGET_OS_IPHONE
-  RTCEAGLVideoView* _videoView;
-#endif
 }
 
 - (instancetype)initWithDelegate:(id<RTCVideoRendererDelegate>)delegate {
@@ -75,22 +68,6 @@
   return self;
 }
 
-#if TARGET_OS_IPHONE
-// TODO(tkchin): remove shim for deprecated method.
-- (instancetype)initWithView:(UIView*)view {
-  if (self = [super init]) {
-    _videoView = [[RTCEAGLVideoView alloc] initWithFrame:view.bounds];
-    _videoView.autoresizingMask =
-        UIViewAutoresizingFlexibleHeight | UIViewAutoresizingFlexibleWidth;
-    _videoView.translatesAutoresizingMaskIntoConstraints = YES;
-    [view addSubview:_videoView];
-    self.delegate = _videoView;
-    _adapter.reset(new webrtc::RTCVideoRendererAdapter(self));
-  }
-  return self;
-}
-#endif
-
 @end
 
 @implementation RTCVideoRenderer (Internal)
diff --git a/app/webrtc/objc/public/RTCVideoRenderer.h b/app/webrtc/objc/public/RTCVideoRenderer.h
index f78746c..37977ce 100644
--- a/app/webrtc/objc/public/RTCVideoRenderer.h
+++ b/app/webrtc/objc/public/RTCVideoRenderer.h
@@ -55,12 +55,6 @@
 // of frames.
 - (instancetype)initWithDelegate:(id<RTCVideoRendererDelegate>)delegate;
 
-#if TARGET_OS_IPHONE
-// DEPRECATED. See https://code.google.com/p/webrtc/issues/detail?id=3341 for
-// details.
-- (instancetype)initWithView:(UIView*)view;
-#endif
-
 #ifndef DOXYGEN_SHOULD_SKIP_THIS
 // Disallow init and don't add to documentation
 - (id)init __attribute__((
diff --git a/app/webrtc/peerconnection.cc b/app/webrtc/peerconnection.cc
index 201269a..d939f98 100644
--- a/app/webrtc/peerconnection.cc
+++ b/app/webrtc/peerconnection.cc
@@ -39,6 +39,7 @@
 #include "talk/session/media/channelmanager.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/stringencode.h"
+#include "webrtc/system_wrappers/interface/field_trial.h"
 
 namespace {
 
@@ -353,10 +354,14 @@
                             cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
                             cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET;
   bool value;
+  // If IPv6 flag was specified, we'll not override it by experiment.
   if (FindConstraint(
-        constraints,
-        MediaConstraintsInterface::kEnableIPv6,
-        &value, NULL) && value) {
+          constraints, MediaConstraintsInterface::kEnableIPv6, &value, NULL)) {
+    if (value) {
+      portallocator_flags |= cricket::PORTALLOCATOR_ENABLE_IPV6;
+    }
+  } else if (webrtc::field_trial::FindFullName("WebRTC-IPv6Default") ==
+             "Enabled") {
     portallocator_flags |= cricket::PORTALLOCATOR_ENABLE_IPV6;
   }
 
@@ -508,10 +513,6 @@
     return;
   }
   RTCOfferAnswerOptions options;
-  // Defaults to receiving audio and not receiving video.
-  options.offer_to_receive_audio =
-      RTCOfferAnswerOptions::kOfferToReceiveMediaTrue;
-  options.offer_to_receive_video = 0;
 
   bool value;
   size_t mandatory_constraints = 0;
@@ -672,7 +673,7 @@
       }
     }
   }
-  return session_->UpdateIce(config.type);
+  return session_->SetIceTransports(config.type);
 }
 
 bool PeerConnection::AddIceCandidate(
diff --git a/app/webrtc/peerconnection_unittest.cc b/app/webrtc/peerconnection_unittest.cc
index ca022c6..977fc11 100644
--- a/app/webrtc/peerconnection_unittest.cc
+++ b/app/webrtc/peerconnection_unittest.cc
@@ -90,6 +90,7 @@
 // warnings.
 #if !defined(THREAD_SANITIZER)
 static const int kMaxWaitForStatsMs = 3000;
+static const int kMaxWaitForRembMs = 5000;
 #endif
 static const int kMaxWaitForFramesMs = 10000;
 static const int kEndAudioFrameCount = 3;
@@ -154,11 +155,11 @@
   }
 
   void AddMediaStream(bool audio, bool video) {
-    std::string label = kStreamLabelBase +
+    std::string stream_label = kStreamLabelBase +
         rtc::ToString<int>(
             static_cast<int>(peer_connection_->local_streams()->count()));
     rtc::scoped_refptr<webrtc::MediaStreamInterface> stream =
-        peer_connection_factory_->CreateLocalMediaStream(label);
+        peer_connection_factory_->CreateLocalMediaStream(stream_label);
 
     if (audio && can_receive_audio()) {
       FakeConstraints constraints;
@@ -169,13 +170,13 @@
           peer_connection_factory_->CreateAudioSource(&constraints);
       // TODO(perkj): Test audio source when it is implemented. Currently audio
       // always use the default input.
+      std::string label = stream_label + kAudioTrackLabelBase;
       rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
-          peer_connection_factory_->CreateAudioTrack(kAudioTrackLabelBase,
-                                                     source));
+          peer_connection_factory_->CreateAudioTrack(label, source));
       stream->AddTrack(audio_track);
     }
     if (video && can_receive_video()) {
-      stream->AddTrack(CreateLocalVideoTrack(label));
+      stream->AddTrack(CreateLocalVideoTrack(stream_label));
     }
 
     EXPECT_TRUE(peer_connection_->AddStream(stream, NULL));
@@ -368,6 +369,16 @@
     return observer->BytesSent();
   }
 
+  int GetAvailableReceivedBandwidthStats() {
+    rtc::scoped_refptr<MockStatsObserver>
+        observer(new rtc::RefCountedObject<MockStatsObserver>());
+    EXPECT_TRUE(peer_connection_->GetStats(
+        observer, NULL, PeerConnectionInterface::kStatsOutputLevelStandard));
+    EXPECT_TRUE_WAIT(observer->called(), kMaxWaitMs);
+    int bw = observer->AvailableReceiveBandwidth();
+    return bw;
+  }
+
   int rendered_width() {
     EXPECT_FALSE(fake_video_renderers_.empty());
     return fake_video_renderers_.empty() ? 1 :
@@ -447,6 +458,12 @@
   webrtc::PeerConnectionInterface* pc() {
     return peer_connection_.get();
   }
+  void StopVideoCapturers() {
+    for (std::vector<cricket::VideoCapturer*>::iterator it =
+        video_capturers_.begin(); it != video_capturers_.end(); ++it) {
+      (*it)->Stop();
+    }
+  }
 
  protected:
   explicit PeerConnectionTestClientBase(const std::string& id)
@@ -464,9 +481,8 @@
     if (!allocator_factory_) {
       return false;
     }
-    audio_thread_.Start();
     fake_audio_capture_module_ = FakeAudioCaptureModule::Create(
-        &audio_thread_);
+        rtc::Thread::Current());
 
     if (fake_audio_capture_module_ == NULL) {
       return false;
@@ -529,21 +545,17 @@
     FakeConstraints source_constraints = video_constraints_;
     source_constraints.SetMandatoryMaxFrameRate(10);
 
+    cricket::FakeVideoCapturer* fake_capturer =
+        new webrtc::FakePeriodicVideoCapturer();
+    video_capturers_.push_back(fake_capturer);
     rtc::scoped_refptr<webrtc::VideoSourceInterface> source =
         peer_connection_factory_->CreateVideoSource(
-            new webrtc::FakePeriodicVideoCapturer(),
-            &source_constraints);
+            fake_capturer, &source_constraints);
     std::string label = stream_label + kVideoTrackLabelBase;
     return peer_connection_factory_->CreateVideoTrack(label, source);
   }
 
   std::string id_;
-  // Separate thread for executing |fake_audio_capture_module_| tasks. Audio
-  // processing must not be performed on the same thread as signaling due to
-  // signaling time constraints and relative complexity of the audio pipeline.
-  // This is consistent with the video pipeline that us a a separate thread for
-  // encoding and decoding.
-  rtc::Thread audio_thread_;
 
   rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
       allocator_factory_;
@@ -569,6 +581,10 @@
 
   // For remote peer communication.
   MessageReceiver* signaling_message_receiver_;
+
+  // Store references to the video capturers we've created, so that we can stop
+  // them, if required.
+  std::vector<cricket::VideoCapturer*> video_capturers_;
 };
 
 class JsepTestClient
@@ -591,7 +607,7 @@
   }
   virtual void Negotiate(bool audio, bool video) {
     rtc::scoped_ptr<SessionDescriptionInterface> offer;
-    EXPECT_TRUE(DoCreateOffer(offer.use()));
+    ASSERT_TRUE(DoCreateOffer(offer.use()));
 
     if (offer->description()->GetContentByName("audio")) {
       offer->description()->GetContentByName("audio")->rejected = !audio;
@@ -1019,6 +1035,30 @@
     }
   }
 
+  // Wait until 'size' bytes of audio has been seen by the receiver, on the
+  // first audio stream.
+  void WaitForAudioData(int size) {
+    const int kMaxWaitForAudioDataMs = 10000;
+
+    StreamCollectionInterface* local_streams =
+        initializing_client()->local_streams();
+    ASSERT_GT(local_streams->count(), 0u);
+    ASSERT_GT(local_streams->at(0)->GetAudioTracks().size(), 0u);
+    MediaStreamTrackInterface* local_audio_track =
+        local_streams->at(0)->GetAudioTracks()[0];
+
+    // Wait until *any* audio has been received.
+    EXPECT_TRUE_WAIT(
+        receiving_client()->GetBytesReceivedStats(local_audio_track) > 0,
+        kMaxWaitForAudioDataMs);
+
+    // Wait until 'size' number of bytes have been received.
+    size += receiving_client()->GetBytesReceivedStats(local_audio_track);
+    EXPECT_TRUE_WAIT(
+        receiving_client()->GetBytesReceivedStats(local_audio_track) > size,
+        kMaxWaitForAudioDataMs);
+  }
+
   SignalingClass* initializing_client() { return initiating_client_.get(); }
   SignalingClass* receiving_client() { return receiving_client_.get(); }
 
@@ -1314,6 +1354,7 @@
 
   // Unregister the existing observer.
   receiving_client()->data_channel()->UnregisterObserver();
+
   std::string data = "hello world";
   SendRtpData(initializing_client()->data_channel(), data);
 
@@ -1437,4 +1478,69 @@
   EnableVideoDecoderFactory();
   LocalP2PTest();
 }
+
+// Test receive bandwidth stats with only audio enabled at receiver.
+TEST_F(JsepPeerConnectionP2PTestClient, ReceivedBweStatsAudio) {
+  ASSERT_TRUE(CreateTestClients());
+  receiving_client()->SetReceiveAudioVideo(true, false);
+  LocalP2PTest();
+
+  // Wait until we have received some audio data. Following REMB shoud be zero.
+  WaitForAudioData(10000);
+  EXPECT_EQ_WAIT(
+      receiving_client()->GetAvailableReceivedBandwidthStats(), 0,
+      kMaxWaitForRembMs);
+}
+
+// Test receive bandwidth stats with combined BWE.
+TEST_F(JsepPeerConnectionP2PTestClient, ReceivedBweStatsCombined) {
+  FakeConstraints setup_constraints;
+  setup_constraints.AddOptional(
+      MediaConstraintsInterface::kCombinedAudioVideoBwe, true);
+  ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+  initializing_client()->AddMediaStream(true, true);
+  initializing_client()->AddMediaStream(false, true);
+  initializing_client()->AddMediaStream(false, true);
+  initializing_client()->AddMediaStream(false, true);
+  LocalP2PTest();
+
+  // Run until a non-zero bw is reported.
+  EXPECT_TRUE_WAIT(receiving_client()->GetAvailableReceivedBandwidthStats() > 0,
+                   kMaxWaitForRembMs);
+
+  // Halt video capturers, then run until we have gotten some audio. Following
+  // REMB should be non-zero.
+  initializing_client()->StopVideoCapturers();
+  WaitForAudioData(10000);
+  EXPECT_TRUE_WAIT(
+      receiving_client()->GetAvailableReceivedBandwidthStats() > 0,
+      kMaxWaitForRembMs);
+}
+
+// Test receive bandwidth stats with 1 video, 3 audio streams but no combined
+// BWE.
+TEST_F(JsepPeerConnectionP2PTestClient, ReceivedBweStatsNotCombined) {
+  FakeConstraints setup_constraints;
+  setup_constraints.AddOptional(
+      MediaConstraintsInterface::kCombinedAudioVideoBwe, false);
+  ASSERT_TRUE(CreateTestClients(&setup_constraints, &setup_constraints));
+  initializing_client()->AddMediaStream(true, true);
+  initializing_client()->AddMediaStream(false, true);
+  initializing_client()->AddMediaStream(false, true);
+  initializing_client()->AddMediaStream(false, true);
+  LocalP2PTest();
+
+  // Run until a non-zero bw is reported.
+  EXPECT_TRUE_WAIT(receiving_client()->GetAvailableReceivedBandwidthStats() > 0,
+                   kMaxWaitForRembMs);
+
+  // Halt video capturers, then run until we have gotten some audio. Following
+  // REMB should be zero.
+  initializing_client()->StopVideoCapturers();
+  WaitForAudioData(10000);
+  EXPECT_EQ_WAIT(
+      receiving_client()->GetAvailableReceivedBandwidthStats(), 0,
+      kMaxWaitForRembMs);
+}
+
 #endif // if !defined(THREAD_SANITIZER)
diff --git a/app/webrtc/peerconnectionfactory.cc b/app/webrtc/peerconnectionfactory.cc
index 5dccba8..862ceda 100644
--- a/app/webrtc/peerconnectionfactory.cc
+++ b/app/webrtc/peerconnectionfactory.cc
@@ -41,6 +41,7 @@
 #include "talk/media/webrtc/webrtcmediaengine.h"
 #include "talk/media/webrtc/webrtcvideodecoderfactory.h"
 #include "talk/media/webrtc/webrtcvideoencoderfactory.h"
+#include "webrtc/base/bind.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 
 using rtc::scoped_refptr;
diff --git a/app/webrtc/statstypes.h b/app/webrtc/statstypes.h
index 8eae1ad..656b83c 100644
--- a/app/webrtc/statstypes.h
+++ b/app/webrtc/statstypes.h
@@ -36,6 +36,7 @@
 #include <vector>
 
 #include "webrtc/base/basictypes.h"
+#include "webrtc/base/common.h"
 #include "webrtc/base/stringencode.h"
 
 namespace webrtc {
diff --git a/app/webrtc/test/fakeaudiocapturemodule.cc b/app/webrtc/test/fakeaudiocapturemodule.cc
index ff45f14..2ad3f0f 100644
--- a/app/webrtc/test/fakeaudiocapturemodule.cc
+++ b/app/webrtc/test/fakeaudiocapturemodule.cc
@@ -94,13 +94,6 @@
   return frames_received_;
 }
 
-int32_t FakeAudioCaptureModule::Version(char* /*version*/,
-                                        uint32_t& /*remaining_buffer_in_bytes*/,
-                                        uint32_t& /*position*/) const {
-  ASSERT(false);
-  return 0;
-}
-
 int32_t FakeAudioCaptureModule::TimeUntilNextProcess() {
   const uint32 current_time = rtc::Time();
   if (current_time < last_process_time_ms_) {
@@ -325,12 +318,6 @@
   return 0;
 }
 
-int32_t FakeAudioCaptureModule::SpeakerIsAvailable(bool* available) {
-  // No speaker, just dropping audio. Return success.
-  *available = true;
-  return 0;
-}
-
 int32_t FakeAudioCaptureModule::InitSpeaker() {
   // No speaker, just playing from file. Return success.
   return 0;
@@ -341,12 +328,6 @@
   return 0;
 }
 
-int32_t FakeAudioCaptureModule::MicrophoneIsAvailable(bool* available) {
-  // No microphone, just playing from file. Return success.
-  *available = true;
-  return 0;
-}
-
 int32_t FakeAudioCaptureModule::InitMicrophone() {
   // No microphone, just playing from file. Return success.
   return 0;
diff --git a/app/webrtc/test/fakeaudiocapturemodule.h b/app/webrtc/test/fakeaudiocapturemodule.h
index aec3e5e..79b72b6 100644
--- a/app/webrtc/test/fakeaudiocapturemodule.h
+++ b/app/webrtc/test/fakeaudiocapturemodule.h
@@ -76,133 +76,132 @@
   // Only functions called by PeerConnection are implemented, the rest do
   // nothing and return success. If a function is not expected to be called by
   // PeerConnection an assertion is triggered if it is in fact called.
-  virtual int32_t Version(char* version,
-                          uint32_t& remaining_buffer_in_bytes,
-                          uint32_t& position) const;
-  virtual int32_t TimeUntilNextProcess();
-  virtual int32_t Process();
-  virtual int32_t ChangeUniqueId(const int32_t id);
+  virtual int32_t TimeUntilNextProcess() OVERRIDE;
+  virtual int32_t Process() OVERRIDE;
+  virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE;
 
-  virtual int32_t ActiveAudioLayer(AudioLayer* audio_layer) const;
+  virtual int32_t ActiveAudioLayer(AudioLayer* audio_layer) const OVERRIDE;
 
-  virtual ErrorCode LastError() const;
+  virtual ErrorCode LastError() const OVERRIDE;
   virtual int32_t RegisterEventObserver(
-      webrtc::AudioDeviceObserver* event_callback);
+      webrtc::AudioDeviceObserver* event_callback) OVERRIDE;
 
   // Note: Calling this method from a callback may result in deadlock.
-  virtual int32_t RegisterAudioCallback(webrtc::AudioTransport* audio_callback);
+  virtual int32_t RegisterAudioCallback(
+      webrtc::AudioTransport* audio_callback) OVERRIDE;
 
-  virtual int32_t Init();
-  virtual int32_t Terminate();
-  virtual bool Initialized() const;
+  virtual int32_t Init() OVERRIDE;
+  virtual int32_t Terminate() OVERRIDE;
+  virtual bool Initialized() const OVERRIDE;
 
-  virtual int16_t PlayoutDevices();
-  virtual int16_t RecordingDevices();
-  virtual int32_t PlayoutDeviceName(uint16_t index,
-                                    char name[webrtc::kAdmMaxDeviceNameSize],
-                                    char guid[webrtc::kAdmMaxGuidSize]);
-  virtual int32_t RecordingDeviceName(uint16_t index,
-                                      char name[webrtc::kAdmMaxDeviceNameSize],
-                                      char guid[webrtc::kAdmMaxGuidSize]);
+  virtual int16_t PlayoutDevices() OVERRIDE;
+  virtual int16_t RecordingDevices() OVERRIDE;
+  virtual int32_t PlayoutDeviceName(
+      uint16_t index,
+      char name[webrtc::kAdmMaxDeviceNameSize],
+      char guid[webrtc::kAdmMaxGuidSize]) OVERRIDE;
+  virtual int32_t RecordingDeviceName(
+      uint16_t index,
+      char name[webrtc::kAdmMaxDeviceNameSize],
+      char guid[webrtc::kAdmMaxGuidSize]) OVERRIDE;
 
-  virtual int32_t SetPlayoutDevice(uint16_t index);
-  virtual int32_t SetPlayoutDevice(WindowsDeviceType device);
-  virtual int32_t SetRecordingDevice(uint16_t index);
-  virtual int32_t SetRecordingDevice(WindowsDeviceType device);
+  virtual int32_t SetPlayoutDevice(uint16_t index) OVERRIDE;
+  virtual int32_t SetPlayoutDevice(WindowsDeviceType device) OVERRIDE;
+  virtual int32_t SetRecordingDevice(uint16_t index) OVERRIDE;
+  virtual int32_t SetRecordingDevice(WindowsDeviceType device) OVERRIDE;
 
-  virtual int32_t PlayoutIsAvailable(bool* available);
-  virtual int32_t InitPlayout();
-  virtual bool PlayoutIsInitialized() const;
-  virtual int32_t RecordingIsAvailable(bool* available);
-  virtual int32_t InitRecording();
-  virtual bool RecordingIsInitialized() const;
+  virtual int32_t PlayoutIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t InitPlayout() OVERRIDE;
+  virtual bool PlayoutIsInitialized() const OVERRIDE;
+  virtual int32_t RecordingIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t InitRecording() OVERRIDE;
+  virtual bool RecordingIsInitialized() const OVERRIDE;
 
-  virtual int32_t StartPlayout();
-  virtual int32_t StopPlayout();
-  virtual bool Playing() const;
-  virtual int32_t StartRecording();
-  virtual int32_t StopRecording();
-  virtual bool Recording() const;
+  virtual int32_t StartPlayout() OVERRIDE;
+  virtual int32_t StopPlayout() OVERRIDE;
+  virtual bool Playing() const OVERRIDE;
+  virtual int32_t StartRecording() OVERRIDE;
+  virtual int32_t StopRecording() OVERRIDE;
+  virtual bool Recording() const OVERRIDE;
 
-  virtual int32_t SetAGC(bool enable);
-  virtual bool AGC() const;
+  virtual int32_t SetAGC(bool enable) OVERRIDE;
+  virtual bool AGC() const OVERRIDE;
 
   virtual int32_t SetWaveOutVolume(uint16_t volume_left,
-                                   uint16_t volume_right);
+                                   uint16_t volume_right) OVERRIDE;
   virtual int32_t WaveOutVolume(uint16_t* volume_left,
-                                uint16_t* volume_right) const;
+                                uint16_t* volume_right) const OVERRIDE;
 
-  virtual int32_t SpeakerIsAvailable(bool* available);
-  virtual int32_t InitSpeaker();
-  virtual bool SpeakerIsInitialized() const;
-  virtual int32_t MicrophoneIsAvailable(bool* available);
-  virtual int32_t InitMicrophone();
-  virtual bool MicrophoneIsInitialized() const;
+  virtual int32_t InitSpeaker() OVERRIDE;
+  virtual bool SpeakerIsInitialized() const OVERRIDE;
+  virtual int32_t InitMicrophone() OVERRIDE;
+  virtual bool MicrophoneIsInitialized() const OVERRIDE;
 
-  virtual int32_t SpeakerVolumeIsAvailable(bool* available);
-  virtual int32_t SetSpeakerVolume(uint32_t volume);
-  virtual int32_t SpeakerVolume(uint32_t* volume) const;
-  virtual int32_t MaxSpeakerVolume(uint32_t* max_volume) const;
-  virtual int32_t MinSpeakerVolume(uint32_t* min_volume) const;
-  virtual int32_t SpeakerVolumeStepSize(uint16_t* step_size) const;
+  virtual int32_t SpeakerVolumeIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t SetSpeakerVolume(uint32_t volume) OVERRIDE;
+  virtual int32_t SpeakerVolume(uint32_t* volume) const OVERRIDE;
+  virtual int32_t MaxSpeakerVolume(uint32_t* max_volume) const OVERRIDE;
+  virtual int32_t MinSpeakerVolume(uint32_t* min_volume) const OVERRIDE;
+  virtual int32_t SpeakerVolumeStepSize(uint16_t* step_size) const OVERRIDE;
 
-  virtual int32_t MicrophoneVolumeIsAvailable(bool* available);
-  virtual int32_t SetMicrophoneVolume(uint32_t volume);
-  virtual int32_t MicrophoneVolume(uint32_t* volume) const;
-  virtual int32_t MaxMicrophoneVolume(uint32_t* max_volume) const;
+  virtual int32_t MicrophoneVolumeIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t SetMicrophoneVolume(uint32_t volume) OVERRIDE;
+  virtual int32_t MicrophoneVolume(uint32_t* volume) const OVERRIDE;
+  virtual int32_t MaxMicrophoneVolume(uint32_t* max_volume) const OVERRIDE;
 
-  virtual int32_t MinMicrophoneVolume(uint32_t* min_volume) const;
-  virtual int32_t MicrophoneVolumeStepSize(uint16_t* step_size) const;
+  virtual int32_t MinMicrophoneVolume(uint32_t* min_volume) const OVERRIDE;
+  virtual int32_t MicrophoneVolumeStepSize(uint16_t* step_size) const OVERRIDE;
 
-  virtual int32_t SpeakerMuteIsAvailable(bool* available);
-  virtual int32_t SetSpeakerMute(bool enable);
-  virtual int32_t SpeakerMute(bool* enabled) const;
+  virtual int32_t SpeakerMuteIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t SetSpeakerMute(bool enable) OVERRIDE;
+  virtual int32_t SpeakerMute(bool* enabled) const OVERRIDE;
 
-  virtual int32_t MicrophoneMuteIsAvailable(bool* available);
-  virtual int32_t SetMicrophoneMute(bool enable);
-  virtual int32_t MicrophoneMute(bool* enabled) const;
+  virtual int32_t MicrophoneMuteIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t SetMicrophoneMute(bool enable) OVERRIDE;
+  virtual int32_t MicrophoneMute(bool* enabled) const OVERRIDE;
 
-  virtual int32_t MicrophoneBoostIsAvailable(bool* available);
-  virtual int32_t SetMicrophoneBoost(bool enable);
-  virtual int32_t MicrophoneBoost(bool* enabled) const;
+  virtual int32_t MicrophoneBoostIsAvailable(bool* available) OVERRIDE;
+  virtual int32_t SetMicrophoneBoost(bool enable) OVERRIDE;
+  virtual int32_t MicrophoneBoost(bool* enabled) const OVERRIDE;
 
-  virtual int32_t StereoPlayoutIsAvailable(bool* available) const;
-  virtual int32_t SetStereoPlayout(bool enable);
-  virtual int32_t StereoPlayout(bool* enabled) const;
-  virtual int32_t StereoRecordingIsAvailable(bool* available) const;
-  virtual int32_t SetStereoRecording(bool enable);
-  virtual int32_t StereoRecording(bool* enabled) const;
-  virtual int32_t SetRecordingChannel(const ChannelType channel);
-  virtual int32_t RecordingChannel(ChannelType* channel) const;
+  virtual int32_t StereoPlayoutIsAvailable(bool* available) const OVERRIDE;
+  virtual int32_t SetStereoPlayout(bool enable) OVERRIDE;
+  virtual int32_t StereoPlayout(bool* enabled) const OVERRIDE;
+  virtual int32_t StereoRecordingIsAvailable(bool* available) const OVERRIDE;
+  virtual int32_t SetStereoRecording(bool enable) OVERRIDE;
+  virtual int32_t StereoRecording(bool* enabled) const OVERRIDE;
+  virtual int32_t SetRecordingChannel(const ChannelType channel) OVERRIDE;
+  virtual int32_t RecordingChannel(ChannelType* channel) const OVERRIDE;
 
   virtual int32_t SetPlayoutBuffer(const BufferType type,
-                                   uint16_t size_ms = 0);
+                                   uint16_t size_ms = 0) OVERRIDE;
   virtual int32_t PlayoutBuffer(BufferType* type,
-                                uint16_t* size_ms) const;
-  virtual int32_t PlayoutDelay(uint16_t* delay_ms) const;
-  virtual int32_t RecordingDelay(uint16_t* delay_ms) const;
+                                uint16_t* size_ms) const OVERRIDE;
+  virtual int32_t PlayoutDelay(uint16_t* delay_ms) const OVERRIDE;
+  virtual int32_t RecordingDelay(uint16_t* delay_ms) const OVERRIDE;
 
-  virtual int32_t CPULoad(uint16_t* load) const;
+  virtual int32_t CPULoad(uint16_t* load) const OVERRIDE;
 
   virtual int32_t StartRawOutputFileRecording(
-      const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
-  virtual int32_t StopRawOutputFileRecording();
+      const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]) OVERRIDE;
+  virtual int32_t StopRawOutputFileRecording() OVERRIDE;
   virtual int32_t StartRawInputFileRecording(
-      const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]);
-  virtual int32_t StopRawInputFileRecording();
+      const char pcm_file_name_utf8[webrtc::kAdmMaxFileNameSize]) OVERRIDE;
+  virtual int32_t StopRawInputFileRecording() OVERRIDE;
 
-  virtual int32_t SetRecordingSampleRate(const uint32_t samples_per_sec);
-  virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const;
-  virtual int32_t SetPlayoutSampleRate(const uint32_t samples_per_sec);
-  virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const;
+  virtual int32_t SetRecordingSampleRate(
+      const uint32_t samples_per_sec) OVERRIDE;
+  virtual int32_t RecordingSampleRate(uint32_t* samples_per_sec) const OVERRIDE;
+  virtual int32_t SetPlayoutSampleRate(const uint32_t samples_per_sec) OVERRIDE;
+  virtual int32_t PlayoutSampleRate(uint32_t* samples_per_sec) const OVERRIDE;
 
-  virtual int32_t ResetAudioDevice();
-  virtual int32_t SetLoudspeakerStatus(bool enable);
-  virtual int32_t GetLoudspeakerStatus(bool* enabled) const;
+  virtual int32_t ResetAudioDevice() OVERRIDE;
+  virtual int32_t SetLoudspeakerStatus(bool enable) OVERRIDE;
+  virtual int32_t GetLoudspeakerStatus(bool* enabled) const OVERRIDE;
   // End of functions inherited from webrtc::AudioDeviceModule.
 
   // The following function is inherited from rtc::MessageHandler.
-  virtual void OnMessage(rtc::Message* msg);
+  virtual void OnMessage(rtc::Message* msg) OVERRIDE;
 
  protected:
   // The constructor is protected because the class needs to be created as a
diff --git a/app/webrtc/test/fakeaudiocapturemodule_unittest.cc b/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
index 9e63c1c..ddacc38 100644
--- a/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
+++ b/app/webrtc/test/fakeaudiocapturemodule_unittest.cc
@@ -145,11 +145,6 @@
 TEST_F(FakeAdmTest, PlayoutTest) {
   EXPECT_EQ(0, fake_audio_capture_module_->RegisterAudioCallback(this));
 
-  bool speaker_available = false;
-  EXPECT_EQ(0, fake_audio_capture_module_->SpeakerIsAvailable(
-      &speaker_available));
-  EXPECT_TRUE(speaker_available);
-
   bool stereo_available = false;
   EXPECT_EQ(0,
             fake_audio_capture_module_->StereoPlayoutIsAvailable(
@@ -182,11 +177,6 @@
 TEST_F(FakeAdmTest, RecordTest) {
   EXPECT_EQ(0, fake_audio_capture_module_->RegisterAudioCallback(this));
 
-  bool microphone_available = false;
-  EXPECT_EQ(0, fake_audio_capture_module_->MicrophoneIsAvailable(
-      &microphone_available));
-  EXPECT_TRUE(microphone_available);
-
   bool stereo_available = false;
   EXPECT_EQ(0, fake_audio_capture_module_->StereoRecordingIsAvailable(
       &stereo_available));
diff --git a/app/webrtc/test/mockpeerconnectionobservers.h b/app/webrtc/test/mockpeerconnectionobservers.h
index 174b80b..0570d40 100644
--- a/app/webrtc/test/mockpeerconnectionobservers.h
+++ b/app/webrtc/test/mockpeerconnectionobservers.h
@@ -133,31 +133,37 @@
   size_t number_of_reports() const { return reports_.size(); }
 
   int AudioOutputLevel() {
-    return GetSsrcStatsValue(
-        webrtc::StatsReport::kStatsValueNameAudioOutputLevel);
+    return GetStatsValue(StatsReport::kStatsReportTypeSsrc,
+                         StatsReport::kStatsValueNameAudioOutputLevel);
   }
 
   int AudioInputLevel() {
-    return GetSsrcStatsValue(
-        webrtc::StatsReport::kStatsValueNameAudioInputLevel);
+    return GetStatsValue(StatsReport::kStatsReportTypeSsrc,
+                         StatsReport::kStatsValueNameAudioInputLevel);
   }
 
   int BytesReceived() {
-    return GetSsrcStatsValue(
-        webrtc::StatsReport::kStatsValueNameBytesReceived);
+    return GetStatsValue(StatsReport::kStatsReportTypeSsrc,
+                         StatsReport::kStatsValueNameBytesReceived);
   }
 
   int BytesSent() {
-    return GetSsrcStatsValue(webrtc::StatsReport::kStatsValueNameBytesSent);
+    return GetStatsValue(StatsReport::kStatsReportTypeSsrc,
+                         StatsReport::kStatsValueNameBytesSent);
+  }
+
+  int AvailableReceiveBandwidth() {
+    return GetStatsValue(StatsReport::kStatsReportTypeBwe,
+                         StatsReport::kStatsValueNameAvailableReceiveBandwidth);
   }
 
  private:
-  int GetSsrcStatsValue(StatsReport::StatsValueName name) {
+  int GetStatsValue(const std::string& type, StatsReport::StatsValueName name) {
     if (reports_.empty()) {
       return 0;
     }
     for (size_t i = 0; i < reports_.size(); ++i) {
-      if (reports_[i].type != StatsReport::kStatsReportTypeSsrc)
+      if (reports_[i].type != type)
         continue;
       webrtc::StatsReport::Values::const_iterator it =
           reports_[i].values.begin();
diff --git a/app/webrtc/test/peerconnectiontestwrapper.cc b/app/webrtc/test/peerconnectiontestwrapper.cc
index 8a4f45c..24932b8 100644
--- a/app/webrtc/test/peerconnectiontestwrapper.cc
+++ b/app/webrtc/test/peerconnectiontestwrapper.cc
@@ -75,9 +75,8 @@
     return false;
   }
 
-  audio_thread_.Start();
   fake_audio_capture_module_ = FakeAudioCaptureModule::Create(
-      &audio_thread_);
+      rtc::Thread::Current());
   if (fake_audio_capture_module_ == NULL) {
     return false;
   }
diff --git a/app/webrtc/test/peerconnectiontestwrapper.h b/app/webrtc/test/peerconnectiontestwrapper.h
index f3477ce..d4a0e4e 100644
--- a/app/webrtc/test/peerconnectiontestwrapper.h
+++ b/app/webrtc/test/peerconnectiontestwrapper.h
@@ -111,7 +111,6 @@
       bool video, const webrtc::FakeConstraints& video_constraints);
 
   std::string name_;
-  rtc::Thread audio_thread_;
   rtc::scoped_refptr<webrtc::PortAllocatorFactoryInterface>
       allocator_factory_;
   rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
diff --git a/app/webrtc/videosource.cc b/app/webrtc/videosource.cc
index 8770e6d..589341d 100644
--- a/app/webrtc/videosource.cc
+++ b/app/webrtc/videosource.cc
@@ -28,6 +28,7 @@
 #include "talk/app/webrtc/videosource.h"
 
 #include <vector>
+#include <cstdlib>
 
 #include "talk/app/webrtc/mediaconstraintsinterface.h"
 #include "talk/session/media/channelmanager.h"
@@ -254,11 +255,15 @@
 
   std::vector<cricket::VideoFormat>::const_iterator it = formats.begin();
   std::vector<cricket::VideoFormat>::const_iterator best_it = formats.begin();
-  int best_diff = abs(default_area - it->width* it->height);
+  int best_diff_area = std::abs(default_area - it->width * it->height);
+  int64 best_diff_interval = kDefaultFormat.interval;
   for (; it != formats.end(); ++it) {
-    int diff = abs(default_area - it->width* it->height);
-    if (diff < best_diff) {
-      best_diff = diff;
+    int diff_area = std::abs(default_area - it->width * it->height);
+    int64 diff_interval = std::abs(kDefaultFormat.interval - it->interval);
+    if (diff_area < best_diff_area ||
+        (diff_area == best_diff_area && diff_interval < best_diff_interval)) {
+      best_diff_area = diff_area;
+      best_diff_interval = diff_interval;
       best_it = it;
     }
   }
diff --git a/app/webrtc/webrtcsdp.cc b/app/webrtc/webrtcsdp.cc
index 792a091..5887409 100644
--- a/app/webrtc/webrtcsdp.cc
+++ b/app/webrtc/webrtcsdp.cc
@@ -71,6 +71,7 @@
 using cricket::kCodecParamSctpProtocol;
 using cricket::kCodecParamSctpStreams;
 using cricket::kCodecParamMaxAverageBitrate;
+using cricket::kCodecParamMaxPlaybackRate;
 using cricket::kCodecParamAssociatedPayloadType;
 using cricket::kWildcardPayloadType;
 using cricket::MediaContentDescription;
@@ -155,6 +156,9 @@
 static const char kAttributeRtcpFb[] = "rtcp-fb";
 static const char kAttributeSendRecv[] = "sendrecv";
 static const char kAttributeInactive[] = "inactive";
+// draft-ietf-mmusic-sctp-sdp-07
+// a=sctp-port
+static const char kAttributeSctpPort[] = "sctp-port";
 
 // Experimental flags
 static const char kAttributeXGoogleFlag[] = "x-google-flag";
@@ -1100,6 +1104,26 @@
   return true;
 }
 
+bool ParseSctpPort(const std::string& line,
+                   int* sctp_port,
+                   SdpParseError* error) {
+  // draft-ietf-mmusic-sctp-sdp-07
+  // a=sctp-port
+  std::vector<std::string> fields;
+  rtc::split(line.substr(kLinePrefixLength),
+                   kSdpDelimiterSpace, &fields);
+  const size_t expected_min_fields = 2;
+  if (fields.size() < expected_min_fields) {
+    return ParseFailedExpectMinFieldNum(line, expected_min_fields, error);
+  }
+  if (!rtc::FromString(fields[1], sctp_port)) {
+    return ParseFailed(line,
+                       "Invalid sctp port value.",
+                       error);
+  }
+  return true;
+}
+
 bool ParseExtmap(const std::string& line, RtpHeaderExtension* extmap,
                  SdpParseError* error) {
   // RFC 5285
@@ -1511,7 +1535,8 @@
     kCodecParamStereo, kCodecParamUseInbandFec, kCodecParamStartBitrate,
     kCodecParamMaxBitrate, kCodecParamMinBitrate, kCodecParamMaxQuantization,
     kCodecParamSctpProtocol, kCodecParamSctpStreams,
-    kCodecParamMaxAverageBitrate, kCodecParamAssociatedPayloadType
+    kCodecParamMaxAverageBitrate, kCodecParamMaxPlaybackRate,
+    kCodecParamAssociatedPayloadType
   };
   for (size_t i = 0; i < ARRAY_SIZE(kFmtpParams); ++i) {
     if (_stricmp(name.c_str(), kFmtpParams[i]) == 0) {
@@ -1563,6 +1588,24 @@
   }
 }
 
+bool AddSctpDataCodec(DataContentDescription* media_desc,
+                      int sctp_port) {
+  if (media_desc->HasCodec(cricket::kGoogleSctpDataCodecId)) {
+    return ParseFailed("",
+                       "Can't have multiple sctp port attributes.",
+                       NULL);
+  }
+  // Add the SCTP Port number as a pseudo-codec "port" parameter
+  cricket::DataCodec codec_port(
+      cricket::kGoogleSctpDataCodecId, cricket::kGoogleSctpDataCodecName,
+      0);
+  codec_port.SetParam(cricket::kCodecParamPort, sctp_port);
+  LOG(INFO) << "AddSctpDataCodec: Got SCTP Port Number "
+            << sctp_port;
+  media_desc->AddCodec(codec_port);
+  return true;
+}
+
 bool GetMinValue(const std::vector<int>& values, int* value) {
   if (values.empty()) {
     return false;
@@ -2129,18 +2172,20 @@
 
     // <fmt>
     std::vector<int> codec_preference;
-    for (size_t j = 3 ; j < fields.size(); ++j) {
-      // TODO(wu): Remove when below bug is fixed.
-      // https://bugzilla.mozilla.org/show_bug.cgi?id=996329
-      if (fields[j] == "" && j == fields.size() - 1) {
-        continue;
-      }
+    if (!is_sctp) {
+      for (size_t j = 3 ; j < fields.size(); ++j) {
+        // TODO(wu): Remove when below bug is fixed.
+        // https://bugzilla.mozilla.org/show_bug.cgi?id=996329
+        if (fields[j] == "" && j == fields.size() - 1) {
+          continue;
+        }
 
-      int pl = 0;
-      if (!GetValueFromString(line, fields[j], &pl, error)) {
-        return false;
+        int pl = 0;
+        if (!GetValueFromString(line, fields[j], &pl, error)) {
+          return false;
+        }
+        codec_preference.push_back(pl);
       }
-      codec_preference.push_back(pl);
     }
 
     // Make a temporary TransportDescription based on |session_td|.
@@ -2167,26 +2212,20 @@
                     codec_preference, pos, &content_name,
                     &transport, candidates, error));
     } else if (HasAttribute(line, kMediaTypeData)) {
-      DataContentDescription* desc =
+      DataContentDescription* data_desc =
           ParseContentDescription<DataContentDescription>(
                     message, cricket::MEDIA_TYPE_DATA, mline_index, protocol,
                     codec_preference, pos, &content_name,
                     &transport, candidates, error);
+      content.reset(data_desc);
 
-      if (desc && protocol == cricket::kMediaProtocolDtlsSctp) {
-        // Add the SCTP Port number as a pseudo-codec "port" parameter
-        cricket::DataCodec codec_port(
-            cricket::kGoogleSctpDataCodecId, cricket::kGoogleSctpDataCodecName,
-            0);
-        codec_port.SetParam(cricket::kCodecParamPort, fields[3]);
-        LOG(INFO) << "ParseMediaDescription: Got SCTP Port Number "
-                  << fields[3];
-        ASSERT(!desc->HasCodec(cricket::kGoogleSctpDataCodecId));
-        desc->AddCodec(codec_port);
+      int p;
+      if (data_desc && protocol == cricket::kMediaProtocolDtlsSctp &&
+          rtc::FromString(fields[3], &p)) {
+        if (!AddSctpDataCodec(data_desc, p))
+          return false;
       }
 
-      content.reset(desc);
-
       // We should always use the default bandwidth for RTP-based data
       // channels.  Don't allow SDP to set the bandwidth, because that
       // would give JS the opportunity to "break the Internet".
@@ -2518,6 +2557,15 @@
       if (!ParseDtlsSetup(line, &(transport->connection_role), error)) {
         return false;
       }
+    } else if (HasAttribute(line, kAttributeSctpPort)) {
+      int sctp_port;
+      if (!ParseSctpPort(line, &sctp_port, error)) {
+        return false;
+      }
+      if (!AddSctpDataCodec(static_cast<DataContentDescription*>(media_desc),
+                            sctp_port)) {
+        return false;
+      }
     } else if (is_rtp) {
       //
       // RTP specific attrubtes
diff --git a/app/webrtc/webrtcsdp_unittest.cc b/app/webrtc/webrtcsdp_unittest.cc
index 6a22e38..560d5da 100644
--- a/app/webrtc/webrtcsdp_unittest.cc
+++ b/app/webrtc/webrtcsdp_unittest.cc
@@ -284,6 +284,16 @@
     "a=mid:data_content_name\r\n"
     "a=sctpmap:5000 webrtc-datachannel 1024\r\n";
 
+// draft-ietf-mmusic-sctp-sdp-07
+static const char kSdpSctpDataChannelStringWithSctpPort[] =
+    "m=application 1 DTLS/SCTP webrtc-datachannel\r\n"
+    "a=fmtp:webrtc-datachannel max-message-size=100000\r\n"
+    "a=sctp-port 5000\r\n"
+    "c=IN IP4 0.0.0.0\r\n"
+    "a=ice-ufrag:ufrag_data\r\n"
+    "a=ice-pwd:pwd_data\r\n"
+    "a=mid:data_content_name\r\n";
+
 static const char kSdpSctpDataChannelWithCandidatesString[] =
     "m=application 2345 DTLS/SCTP 5000\r\n"
     "c=IN IP4 74.125.127.126\r\n"
@@ -2023,6 +2033,36 @@
   EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
 }
 
+TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannelsWithSctpPort) {
+  AddSctpDataChannel();
+  JsepSessionDescription jdesc(kDummyString);
+  ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+
+  std::string sdp_with_data = kSdpString;
+  sdp_with_data.append(kSdpSctpDataChannelStringWithSctpPort);
+  JsepSessionDescription jdesc_output(kDummyString);
+
+  EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
+  EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
+}
+
+// Test to check the behaviour if sctp-port is specified
+// on the m= line and in a=sctp-port.
+TEST_F(WebRtcSdpTest, DeserializeSdpWithMultiSctpPort) {
+  AddSctpDataChannel();
+  JsepSessionDescription jdesc(kDummyString);
+  ASSERT_TRUE(jdesc.Initialize(desc_.Copy(), kSessionId, kSessionVersion));
+
+  std::string sdp_with_data = kSdpString;
+  // Append m= attributes
+  sdp_with_data.append(kSdpSctpDataChannelString);
+  // Append a=sctp-port attribute
+  sdp_with_data.append("a=sctp-port 5000\r\n");
+  JsepSessionDescription jdesc_output(kDummyString);
+
+  EXPECT_FALSE(SdpDeserialize(sdp_with_data, &jdesc_output));
+}
+
 // For crbug/344475.
 TEST_F(WebRtcSdpTest, DeserializeSdpWithCorruptedSctpDataChannels) {
   std::string sdp_with_data = kSdpString;
@@ -2071,6 +2111,19 @@
 
   EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
   EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
+
+  // We need to test the deserialized JsepSessionDescription from
+  // kSdpSctpDataChannelStringWithSctpPort for
+  // draft-ietf-mmusic-sctp-sdp-07
+  // a=sctp-port
+  sdp_with_data = kSdpString;
+  sdp_with_data.append(kSdpSctpDataChannelStringWithSctpPort);
+  rtc::replace_substrs(default_portstr, strlen(default_portstr),
+                             unusual_portstr, strlen(unusual_portstr),
+                             &sdp_with_data);
+
+  EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output));
+  EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output));
 }
 
 TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpDataChannelsAndBandwidth) {
diff --git a/app/webrtc/webrtcsession.cc b/app/webrtc/webrtcsession.cc
index 6d57afb..13e2128 100644
--- a/app/webrtc/webrtcsession.cc
+++ b/app/webrtc/webrtcsession.cc
@@ -224,30 +224,28 @@
   cricket::StreamParams stream_out;
   const cricket::ContentInfo* audio_info =
       cricket::GetFirstAudioContent(session_description);
-  if (!audio_info) {
-    return false;
-  }
-  const cricket::MediaContentDescription* audio_content =
-      static_cast<const cricket::MediaContentDescription*>(
-          audio_info->description);
+  if (audio_info) {
+    const cricket::MediaContentDescription* audio_content =
+        static_cast<const cricket::MediaContentDescription*>(
+            audio_info->description);
 
-  if (cricket::GetStreamBySsrc(audio_content->streams(), ssrc, &stream_out)) {
-    *track_id = stream_out.id;
-    return true;
+    if (cricket::GetStreamBySsrc(audio_content->streams(), ssrc, &stream_out)) {
+      *track_id = stream_out.id;
+      return true;
+    }
   }
 
   const cricket::ContentInfo* video_info =
       cricket::GetFirstVideoContent(session_description);
-  if (!video_info) {
-    return false;
-  }
-  const cricket::MediaContentDescription* video_content =
-      static_cast<const cricket::MediaContentDescription*>(
-          video_info->description);
+  if (video_info) {
+    const cricket::MediaContentDescription* video_content =
+        static_cast<const cricket::MediaContentDescription*>(
+            video_info->description);
 
-  if (cricket::GetStreamBySsrc(video_content->streams(), ssrc, &stream_out)) {
-    *track_id = stream_out.id;
-    return true;
+    if (cricket::GetStreamBySsrc(video_content->streams(), ssrc, &stream_out)) {
+      *track_id = stream_out.id;
+      return true;
+    }
   }
   return false;
 }
@@ -389,6 +387,22 @@
   }
 }
 
+uint32 ConvertIceTransportTypeToCandidateFilter(
+    PeerConnectionInterface::IceTransportsType type) {
+  switch (type) {
+    case PeerConnectionInterface::kNone:
+        return cricket::CF_NONE;
+    case PeerConnectionInterface::kRelay:
+        return cricket::CF_RELAY;
+    case PeerConnectionInterface::kNoHost:
+        return (cricket::CF_ALL & ~cricket::CF_HOST);
+    case PeerConnectionInterface::kAll:
+        return cricket::CF_ALL;
+    default: ASSERT(false);
+  }
+  return cricket::CF_NONE;
+}
+
 // Help class used to remember if a a remote peer has requested ice restart by
 // by sending a description with new ice ufrag and password.
 class IceRestartAnswerLatch {
@@ -471,14 +485,16 @@
 }
 
 WebRtcSession::~WebRtcSession() {
-  if (voice_channel_.get()) {
-    SignalVoiceChannelDestroyed();
-    channel_manager_->DestroyVoiceChannel(voice_channel_.release());
-  }
+  // Destroy video_channel_ first since it may have a pointer to the
+  // voice_channel_.
   if (video_channel_.get()) {
     SignalVideoChannelDestroyed();
     channel_manager_->DestroyVideoChannel(video_channel_.release());
   }
+  if (voice_channel_.get()) {
+    SignalVoiceChannelDestroyed();
+    channel_manager_->DestroyVoiceChannel(voice_channel_.release());
+  }
   if (data_channel_.get()) {
     SignalDataChannelDestroyed();
     channel_manager_->DestroyDataChannel(data_channel_.release());
@@ -610,6 +626,10 @@
         cricket::VideoOptions::HIGH);
   }
 
+  SetOptionFromOptionalConstraint(constraints,
+      MediaConstraintsInterface::kCombinedAudioVideoBwe,
+      &audio_options_.combined_audio_video_bwe);
+
   const cricket::VideoCodec default_codec(
       JsepSessionDescription::kDefaultVideoCodecId,
       JsepSessionDescription::kDefaultVideoCodecName,
@@ -636,7 +656,8 @@
   if (options.disable_encryption) {
     webrtc_session_desc_factory_->SetSdesPolicy(cricket::SEC_DISABLED);
   }
-
+  port_allocator()->set_candidate_filter(
+      ConvertIceTransportTypeToCandidateFilter(ice_transport));
   return true;
 }
 
@@ -742,6 +763,7 @@
   if (!UpdateSessionState(action, cricket::CS_LOCAL, err_desc)) {
     return false;
   }
+
   // Kick starting the ice candidates allocation.
   StartCandidatesAllocation();
 
@@ -903,8 +925,10 @@
   return UseCandidate(candidate);
 }
 
-bool WebRtcSession::UpdateIce(PeerConnectionInterface::IceTransportsType type) {
-  return false;
+bool WebRtcSession::SetIceTransports(
+    PeerConnectionInterface::IceTransportsType type) {
+  return port_allocator()->set_candidate_filter(
+        ConvertIceTransportTypeToCandidateFilter(type));
 }
 
 bool WebRtcSession::GetLocalTrackIdBySsrc(uint32 ssrc, std::string* track_id) {
@@ -1425,16 +1449,8 @@
 
 void WebRtcSession::RemoveUnusedChannelsAndTransports(
     const SessionDescription* desc) {
-  const cricket::ContentInfo* voice_info =
-      cricket::GetFirstAudioContent(desc);
-  if ((!voice_info || voice_info->rejected) && voice_channel_) {
-    mediastream_signaling_->OnAudioChannelClose();
-    SignalVoiceChannelDestroyed();
-    const std::string content_name = voice_channel_->content_name();
-    channel_manager_->DestroyVoiceChannel(voice_channel_.release());
-    DestroyTransportProxy(content_name);
-  }
-
+  // Destroy video_channel_ first since it may have a pointer to the
+  // voice_channel_.
   const cricket::ContentInfo* video_info =
       cricket::GetFirstVideoContent(desc);
   if ((!video_info || video_info->rejected) && video_channel_) {
@@ -1445,6 +1461,16 @@
     DestroyTransportProxy(content_name);
   }
 
+  const cricket::ContentInfo* voice_info =
+      cricket::GetFirstAudioContent(desc);
+  if ((!voice_info || voice_info->rejected) && voice_channel_) {
+    mediastream_signaling_->OnAudioChannelClose();
+    SignalVoiceChannelDestroyed();
+    const std::string content_name = voice_channel_->content_name();
+    channel_manager_->DestroyVoiceChannel(voice_channel_.release());
+    DestroyTransportProxy(content_name);
+  }
+
   const cricket::ContentInfo* data_info =
       cricket::GetFirstDataContent(desc);
   if ((!data_info || data_info->rejected) && data_channel_) {
diff --git a/app/webrtc/webrtcsession.h b/app/webrtc/webrtcsession.h
index d1e5645..86ae435 100644
--- a/app/webrtc/webrtcsession.h
+++ b/app/webrtc/webrtcsession.h
@@ -160,7 +160,7 @@
                             std::string* err_desc);
   bool ProcessIceMessage(const IceCandidateInterface* ice_candidate);
 
-  bool UpdateIce(PeerConnectionInterface::IceTransportsType type);
+  bool SetIceTransports(PeerConnectionInterface::IceTransportsType type);
 
   const SessionDescriptionInterface* local_description() const {
     return local_desc_.get();
diff --git a/app/webrtc/webrtcsession_unittest.cc b/app/webrtc/webrtcsession_unittest.cc
index 206f320..7aa87fb 100644
--- a/app/webrtc/webrtcsession_unittest.cc
+++ b/app/webrtc/webrtcsession_unittest.cc
@@ -108,6 +108,8 @@
 static const char kStunAddrHost[] = "99.99.99.1";
 static const SocketAddress kTurnUdpIntAddr("99.99.99.4", 3478);
 static const SocketAddress kTurnUdpExtAddr("99.99.99.6", 0);
+static const char kTurnUsername[] = "test";
+static const char kTurnPassword[] = "test";
 
 static const char kSessionVersion[] = "1";
 
@@ -510,7 +512,7 @@
   }
 
   void VerifyAnswerFromNonCryptoOffer() {
-    // Create a SDP without Crypto.
+    // Create an SDP without Crypto.
     cricket::MediaSessionOptions options;
     options.has_video = true;
     JsepSessionDescription* offer(
@@ -1083,6 +1085,18 @@
     }
   }
 
+  void ConfigureAllocatorWithTurn() {
+    cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
+    cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
+    relay_server.credentials = credentials;
+    relay_server.ports.push_back(cricket::ProtocolAddress(
+        kTurnUdpIntAddr, cricket::PROTO_UDP, false));
+    allocator_->AddRelay(relay_server);
+    allocator_->set_step_delay(cricket::kMinimumStepDelay);
+    allocator_->set_flags(cricket::PORTALLOCATOR_DISABLE_TCP |
+                          cricket::PORTALLOCATOR_ENABLE_BUNDLE);
+  }
+
   cricket::FakeMediaEngine* media_engine_;
   cricket::FakeDataEngine* data_engine_;
   cricket::FakeDeviceManager* device_manager_;
@@ -1162,6 +1176,53 @@
   EXPECT_EQ(6u, observer_.mline_1_candidates_.size());
 }
 
+// Test session delivers no candidates gathered when constraint set to "none".
+TEST_F(WebRtcSessionTest, TestIceTransportsNone) {
+  AddInterface(rtc::SocketAddress(kClientAddrHost1, kClientAddrPort));
+  SetIceTransportType(PeerConnectionInterface::kNone);
+  Init(NULL);
+  mediastream_signaling_.SendAudioVideoStream1();
+  InitiateCall();
+  EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+  EXPECT_EQ(0u, observer_.mline_0_candidates_.size());
+  EXPECT_EQ(0u, observer_.mline_1_candidates_.size());
+}
+
+// Test session delivers only relay candidates gathered when constaint set to
+// "relay".
+TEST_F(WebRtcSessionTest, TestIceTransportsRelay) {
+  AddInterface(rtc::SocketAddress(kClientAddrHost1, kClientAddrPort));
+  ConfigureAllocatorWithTurn();
+  SetIceTransportType(PeerConnectionInterface::kRelay);
+  Init(NULL);
+  mediastream_signaling_.SendAudioVideoStream1();
+  InitiateCall();
+  EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+  EXPECT_EQ(2u, observer_.mline_0_candidates_.size());
+  EXPECT_EQ(2u, observer_.mline_1_candidates_.size());
+  for (size_t i = 0; i < observer_.mline_0_candidates_.size(); ++i) {
+    EXPECT_EQ(cricket::RELAY_PORT_TYPE,
+              observer_.mline_0_candidates_[i].type());
+  }
+  for (size_t i = 0; i < observer_.mline_1_candidates_.size(); ++i) {
+    EXPECT_EQ(cricket::RELAY_PORT_TYPE,
+              observer_.mline_1_candidates_[i].type());
+  }
+}
+
+// Test session delivers all candidates gathered when constaint set to "all".
+TEST_F(WebRtcSessionTest, TestIceTransportsAll) {
+  AddInterface(rtc::SocketAddress(kClientAddrHost1, kClientAddrPort));
+  SetIceTransportType(PeerConnectionInterface::kAll);
+  Init(NULL);
+  mediastream_signaling_.SendAudioVideoStream1();
+  InitiateCall();
+  EXPECT_TRUE_WAIT(observer_.oncandidatesready_, kIceCandidatesTimeout);
+  // Host + STUN. By default allocator is disabled to gather relay candidates.
+  EXPECT_EQ(4u, observer_.mline_0_candidates_.size());
+  EXPECT_EQ(4u, observer_.mline_1_candidates_.size());
+}
+
 TEST_F(WebRtcSessionTest, SetSdpFailedOnInvalidSdp) {
   Init(NULL);
   SessionDescriptionInterface* offer = NULL;
@@ -1211,14 +1272,13 @@
             rtc::FromString<uint64>(offer->session_version()));
 
   SetLocalDescriptionWithoutError(offer);
+  EXPECT_EQ(0u, video_channel_->send_streams().size());
+  EXPECT_EQ(0u, voice_channel_->send_streams().size());
 
   mediastream_signaling_.SendAudioVideoStream2();
   answer = CreateRemoteAnswer(session_->local_description());
   SetRemoteDescriptionWithoutError(answer);
 
-  EXPECT_EQ(0u, video_channel_->send_streams().size());
-  EXPECT_EQ(0u, voice_channel_->send_streams().size());
-
   // Make sure the receive streams have not changed.
   ASSERT_EQ(1u, video_channel_->recv_streams().size());
   EXPECT_TRUE(kVideoTrack2 == video_channel_->recv_streams()[0].id);
@@ -1992,13 +2052,21 @@
 
   const cricket::ContentInfo* content =
       cricket::GetFirstAudioContent(offer->description());
-
   EXPECT_TRUE(content != NULL);
+
   content = cricket::GetFirstVideoContent(offer->description());
   EXPECT_TRUE(content != NULL);
 
-  // TODO(perkj): Should the direction be set to SEND_ONLY if
-  // The constraints is set to not receive audio or video but a track is added?
+  // Sets constraints to false and verifies that audio/video contents are
+  // removed.
+  options.offer_to_receive_audio = 0;
+  options.offer_to_receive_video = 0;
+  offer.reset(CreateOffer(options));
+
+  content = cricket::GetFirstAudioContent(offer->description());
+  EXPECT_TRUE(content == NULL);
+  content = cricket::GetFirstVideoContent(offer->description());
+  EXPECT_TRUE(content == NULL);
 }
 
 // Test that an answer can not be created if the last remote description is not
@@ -2037,8 +2105,7 @@
   Init(NULL);
   // Create a remote offer with audio only.
   cricket::MediaSessionOptions options;
-  options.has_audio = true;
-  options.has_video = false;
+
   rtc::scoped_ptr<JsepSessionDescription> offer(
       CreateRemoteOffer(options));
   ASSERT_TRUE(cricket::GetFirstVideoContent(offer->description()) == NULL);
@@ -2174,7 +2241,6 @@
   SessionDescriptionInterface* offer = CreateOffer();
 
   cricket::MediaSessionOptions options;
-  options.has_video = false;
   SessionDescriptionInterface* answer = CreateRemoteAnswer(offer, options);
 
   // SetLocalDescription and SetRemoteDescriptions takes ownership of offer
@@ -2887,7 +2953,6 @@
 TEST_F(WebRtcSessionTest, TestCreateAnswerWithNewUfragAndPassword) {
   Init(NULL);
   cricket::MediaSessionOptions options;
-  options.has_audio = true;
   options.has_video = true;
   rtc::scoped_ptr<JsepSessionDescription> offer(
       CreateRemoteOffer(options));
@@ -2919,7 +2984,6 @@
 TEST_F(WebRtcSessionTest, TestCreateAnswerWithOldUfragAndPassword) {
   Init(NULL);
   cricket::MediaSessionOptions options;
-  options.has_audio = true;
   options.has_video = true;
   rtc::scoped_ptr<JsepSessionDescription> offer(
       CreateRemoteOffer(options));
@@ -2985,7 +3049,6 @@
 TEST_F(WebRtcSessionTest, SetSdpFailedOnSessionError) {
   Init(NULL);
   cricket::MediaSessionOptions options;
-  options.has_audio = true;
   options.has_video = true;
 
   cricket::BaseSession::Error error_code = cricket::BaseSession::ERROR_CONTENT;
@@ -3311,6 +3374,26 @@
   SetAndVerifyNumUnsignalledRecvStreams(-1, 0);
 }
 
+TEST_F(WebRtcSessionTest, TestCombinedAudioVideoBweConstraint) {
+  constraints_.reset(new FakeConstraints());
+  constraints_->AddOptional(
+      webrtc::MediaConstraintsInterface::kCombinedAudioVideoBwe,
+      true);
+  Init(NULL);
+  mediastream_signaling_.SendAudioVideoStream1();
+  SessionDescriptionInterface* offer = CreateOffer();
+
+  SetLocalDescriptionWithoutError(offer);
+
+  voice_channel_ = media_engine_->GetVoiceChannel(0);
+
+  ASSERT_TRUE(voice_channel_ != NULL);
+  cricket::AudioOptions audio_options;
+  EXPECT_TRUE(voice_channel_->GetOptions(&audio_options));
+  EXPECT_TRUE(
+      audio_options.combined_audio_video_bwe.GetWithDefaultIfUnset(false));
+}
+
 // Tests that we can renegotiate new media content with ICE candidates in the
 // new remote SDP.
 TEST_F(WebRtcSessionTest, TestRenegotiateNewMediaWithCandidatesInSdp) {
diff --git a/build/common.gypi b/build/common.gypi
index 57b21af..7ee4224 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -91,6 +91,7 @@
               # LateBindingSymbolTable::TableInfo from
               # latebindingsymboltable.cc.def and remove below flag.
               '-Wno-address-of-array-temporary',
+              '-Wthread-safety',
             ],
           }],
         ],
diff --git a/examples/android/src/org/appspot/apprtc/AppRTCDemoActivity.java b/examples/android/src/org/appspot/apprtc/AppRTCDemoActivity.java
index 213da7b..468ce22 100644
--- a/examples/android/src/org/appspot/apprtc/AppRTCDemoActivity.java
+++ b/examples/android/src/org/appspot/apprtc/AppRTCDemoActivity.java
@@ -77,7 +77,6 @@
 public class AppRTCDemoActivity extends Activity
     implements AppRTCClient.IceServersObserver {
   private static final String TAG = "AppRTCDemoActivity";
-  private static boolean factoryStaticInitialized;
   private PeerConnectionFactory factory;
   private VideoSource videoSource;
   private boolean videoSourceStopped;
@@ -133,13 +132,6 @@
     hudView.setVisibility(View.INVISIBLE);
     addContentView(hudView, hudLayout);
 
-    if (!factoryStaticInitialized) {
-      abortUnless(PeerConnectionFactory.initializeAndroidGlobals(
-          this, true, true),
-        "Failed to initializeAndroidGlobals");
-      factoryStaticInitialized = true;
-    }
-
     AudioManager audioManager =
         ((AudioManager) getSystemService(AUDIO_SERVICE));
     // TODO(fischman): figure out how to do this Right(tm) and remove the
@@ -282,6 +274,9 @@
 
   @Override
   public void onIceServers(List<PeerConnection.IceServer> iceServers) {
+    abortUnless(PeerConnectionFactory.initializeAndroidGlobals(
+      this, true, true, VideoRendererGui.getEGLContext()),
+        "Failed to initializeAndroidGlobals");
     factory = new PeerConnectionFactory();
 
     MediaConstraints pcConstraints = appRtcClient.pcConstraints();
diff --git a/examples/call/callclient.cc b/examples/call/callclient.cc
index 2c8a6bc..b31e24a 100644
--- a/examples/call/callclient.cc
+++ b/examples/call/callclient.cc
@@ -889,10 +889,8 @@
     AddSession(call_->InitiateSession(jid, media_client_->jid(), options));
   }
   media_client_->SetFocus(call_);
-  if (call_->has_video() && render_) {
-    if (!options.is_muc) {
-      call_->SetLocalRenderer(local_renderer_);
-    }
+  if (call_->has_video() && render_ && !options.is_muc) {
+    // TODO(pthatcher): Hookup local_render_ to the local capturer.
   }
   if (options.is_muc) {
     const std::string& nick = mucs_[jid]->local_jid().resource();
@@ -1086,7 +1084,7 @@
   call_->AcceptSession(session, options);
   media_client_->SetFocus(call_);
   if (call_->has_video() && render_) {
-    call_->SetLocalRenderer(local_renderer_);
+    // TODO(pthatcher): Hookup local_render_ to the local capturer.
     RenderAllStreams(call_, session, true);
   }
   SetupAcceptedCall();
diff --git a/examples/objc/AppRTCDemo/ios/APPRTCViewController.m b/examples/objc/AppRTCDemo/ios/APPRTCViewController.m
index a4a0bd3..8042762 100644
--- a/examples/objc/AppRTCDemo/ios/APPRTCViewController.m
+++ b/examples/objc/AppRTCDemo/ios/APPRTCViewController.m
@@ -63,6 +63,18 @@
 
 - (void)viewDidLoad {
   [super viewDidLoad];
+
+  self.remoteVideoView =
+      [[RTCEAGLVideoView alloc] initWithFrame:self.blackView.bounds];
+  self.remoteVideoView.delegate = self;
+  self.remoteVideoView.transform = CGAffineTransformMakeScale(-1, 1);
+  [self.blackView addSubview:self.remoteVideoView];
+
+  self.localVideoView =
+      [[RTCEAGLVideoView alloc] initWithFrame:self.blackView.bounds];
+  self.localVideoView.delegate = self;
+  [self.blackView addSubview:self.localVideoView];
+
   self.statusBarOrientation =
       [UIApplication sharedApplication].statusBarOrientation;
   self.roomInput.delegate = self;
@@ -181,25 +193,13 @@
   self.instructionsView.hidden = NO;
   self.logView.hidden = YES;
   self.logView.text = nil;
+  self.localVideoView.videoTrack = nil;
+  self.remoteVideoView.videoTrack = nil;
   self.blackView.hidden = YES;
-  [self.remoteVideoView removeFromSuperview];
-  self.remoteVideoView = nil;
-  [self.localVideoView removeFromSuperview];
-  self.localVideoView = nil;
 }
 
 - (void)setupCaptureSession {
   self.blackView.hidden = NO;
-  self.remoteVideoView =
-      [[RTCEAGLVideoView alloc] initWithFrame:self.blackView.bounds];
-  self.remoteVideoView.delegate = self;
-  self.remoteVideoView.transform = CGAffineTransformMakeScale(-1, 1);
-  [self.blackView addSubview:self.remoteVideoView];
-
-  self.localVideoView =
-      [[RTCEAGLVideoView alloc] initWithFrame:self.blackView.bounds];
-  self.localVideoView.delegate = self;
-  [self.blackView addSubview:self.localVideoView];
   [self updateVideoViewLayout];
 }
 
diff --git a/examples/peerconnection/server/utils.h b/examples/peerconnection/server/utils.h
index d05a2c3..5320d5a 100644
--- a/examples/peerconnection/server/utils.h
+++ b/examples/peerconnection/server/utils.h
@@ -29,18 +29,7 @@
 #define TALK_EXAMPLES_PEERCONNECTION_SERVER_UTILS_H_
 #pragma once
 
-#ifndef assert
-#ifndef WIN32
 #include <assert.h>
-#else
-#ifndef NDEBUG
-#define assert(expr)  ((void)((expr) ? true : __debugbreak()))
-#else
-#define assert(expr)  ((void)0)
-#endif  // NDEBUG
-#endif  // WIN32
-#endif  // assert
-
 #include <string>
 
 #ifndef ARRAYSIZE
diff --git a/libjingle.gyp b/libjingle.gyp
index f4f9bf9..335a788 100755
--- a/libjingle.gyp
+++ b/libjingle.gyp
@@ -303,27 +303,13 @@
       'dependencies': [
         '<(DEPTH)/third_party/expat/expat.gyp:expat',
         '<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
-        '<(webrtc_root)/base/base.gyp:webrtc_base',
+        '<(webrtc_root)/base/base.gyp:rtc_base',
       ],
       'export_dependent_settings': [
         '<(DEPTH)/third_party/expat/expat.gyp:expat',
         '<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
       ],
       'sources': [
-        'xmllite/qname.cc',
-        'xmllite/qname.h',
-        'xmllite/xmlbuilder.cc',
-        'xmllite/xmlbuilder.h',
-        'xmllite/xmlconstants.cc',
-        'xmllite/xmlconstants.h',
-        'xmllite/xmlelement.cc',
-        'xmllite/xmlelement.h',
-        'xmllite/xmlnsstack.cc',
-        'xmllite/xmlnsstack.h',
-        'xmllite/xmlparser.cc',
-        'xmllite/xmlparser.h',
-        'xmllite/xmlprinter.cc',
-        'xmllite/xmlprinter.h',
         'xmpp/asyncsocket.h',
         'xmpp/chatroommodule.h',
         'xmpp/chatroommoduleimpl.cc',
@@ -400,48 +386,6 @@
       ],
     },  # target libjingle
     {
-      'target_name': 'libjingle_sound',
-      'type': 'static_library',
-      'dependencies': [
-        'libjingle',
-      ],
-      'sources': [
-        'sound/automaticallychosensoundsystem.h',
-        'sound/nullsoundsystem.cc',
-        'sound/nullsoundsystem.h',
-        'sound/nullsoundsystemfactory.cc',
-        'sound/nullsoundsystemfactory.h',
-        'sound/platformsoundsystem.cc',
-        'sound/platformsoundsystem.h',
-        'sound/platformsoundsystemfactory.cc',
-        'sound/platformsoundsystemfactory.h',
-        'sound/sounddevicelocator.h',
-        'sound/soundinputstreaminterface.h',
-        'sound/soundoutputstreaminterface.h',
-        'sound/soundsystemfactory.h',
-        'sound/soundsysteminterface.cc',
-        'sound/soundsysteminterface.h',
-        'sound/soundsystemproxy.cc',
-        'sound/soundsystemproxy.h',
-      ],
-      'conditions': [
-        ['OS=="linux"', {
-          'sources': [
-            'sound/alsasoundsystem.cc',
-            'sound/alsasoundsystem.h',
-            'sound/alsasymboltable.cc',
-            'sound/alsasymboltable.h',
-            'sound/linuxsoundsystem.cc',
-            'sound/linuxsoundsystem.h',
-            'sound/pulseaudiosoundsystem.cc',
-            'sound/pulseaudiosoundsystem.h',
-            'sound/pulseaudiosymboltable.cc',
-            'sound/pulseaudiosymboltable.h',
-          ],
-        }],
-      ],
-    },  # target libjingle_sound
-    {
       'target_name': 'libjingle_media',
       'type': 'static_library',
       'include_dirs': [
@@ -452,14 +396,14 @@
       'dependencies': [
         '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
         '<(DEPTH)/third_party/usrsctp/usrsctp.gyp:usrsctplib',
-        '<(webrtc_root)/modules/modules.gyp:video_capture_module',
         '<(webrtc_root)/modules/modules.gyp:video_render_module',
         '<(webrtc_root)/webrtc.gyp:webrtc',
         '<(webrtc_root)/voice_engine/voice_engine.gyp:voice_engine',
+        '<(webrtc_root)/sound/sound.gyp:rtc_sound',
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:field_trial_default',
+        '<(webrtc_root)/libjingle/xmllite/xmllite.gyp:rtc_xmllite',
         'libjingle',
-        'libjingle_sound',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
@@ -485,8 +429,6 @@
         'media/base/filemediaengine.cc',
         'media/base/filemediaengine.h',
         'media/base/hybriddataengine.h',
-        'media/base/hybridvideoengine.cc',
-        'media/base/hybridvideoengine.h',
         'media/base/mediachannel.h',
         'media/base/mediacommon.h',
         'media/base/mediaengine.cc',
@@ -556,6 +498,17 @@
         'media/webrtc/webrtcvoiceengine.h',
       ],
       'conditions': [
+        ['build_with_chromium==1', {
+	  'dependencies': [
+            '<(webrtc_root)/modules/modules.gyp:video_capture_module_impl',
+            '<(webrtc_root)/modules/modules.gyp:video_render_module_impl',
+	  ],
+	}, {
+	  'dependencies': [
+            '<(webrtc_root)/modules/modules.gyp:video_capture_module_internal_impl',
+            '<(webrtc_root)/modules/modules.gyp:video_render_module_internal_impl',
+	  ],
+	}],
         ['OS=="linux"', {
           'sources': [
             'media/devices/gtkvideorenderer.cc',
@@ -631,6 +584,7 @@
           'link_settings': {
             'xcode_settings': {
               'OTHER_LDFLAGS': [
+                '-weak_framework AVFoundation',
                 '-framework Cocoa',
                 '-framework CoreAudio',
                 '-framework CoreVideo',
diff --git a/libjingle_tests.gyp b/libjingle_tests.gyp
index 76f22ea..44adec3 100755
--- a/libjingle_tests.gyp
+++ b/libjingle_tests.gyp
@@ -33,7 +33,7 @@
       'type': 'static_library',
       'dependencies': [
         '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
         '<@(libjingle_tests_additional_deps)',
       ],
       'direct_dependent_settings': {
@@ -74,18 +74,13 @@
       'type': 'executable',
       'includes': [ 'build/ios_tests.gypi', ],
       'dependencies': [
-        '<(webrtc_root)/base/base.gyp:webrtc_base',
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/base/base.gyp:rtc_base',
+        '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
+        '<(webrtc_root)/libjingle/xmllite/xmllite.gyp:rtc_xmllite',
         'libjingle.gyp:libjingle',
         'libjingle_unittest_main',
       ],
       'sources': [
-        'xmllite/qname_unittest.cc',
-        'xmllite/xmlbuilder_unittest.cc',
-        'xmllite/xmlelement_unittest.cc',
-        'xmllite/xmlnsstack_unittest.cc',
-        'xmllite/xmlparser_unittest.cc',
-        'xmllite/xmlprinter_unittest.cc',
         'xmpp/fakexmppclient.h',
         'xmpp/hangoutpubsubclient_unittest.cc',
         'xmpp/jid_unittest.cc',
@@ -104,22 +99,10 @@
       ],  # sources
     },  # target libjingle_unittest
     {
-      'target_name': 'libjingle_sound_unittest',
-      'type': 'executable',
-      'dependencies': [
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
-        'libjingle.gyp:libjingle_sound',
-        'libjingle_unittest_main',
-      ],
-      'sources': [
-        'sound/automaticallychosensoundsystem_unittest.cc',
-      ],
-    },  # target libjingle_sound_unittest
-    {
       'target_name': 'libjingle_media_unittest',
       'type': 'executable',
       'dependencies': [
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
         'libjingle.gyp:libjingle_media',
         'libjingle_unittest_main',
       ],
@@ -157,9 +140,8 @@
         'media/sctp/sctpdataengine_unittest.cc',
         'media/webrtc/webrtcpassthroughrender_unittest.cc',
         'media/webrtc/webrtcvideocapturer_unittest.cc',
-        # Omitted because depends on non-open-source testdata files.
-        # 'media/base/videoframe_unittest.h',
-        # 'media/webrtc/webrtcvideoframe_unittest.cc',
+        'media/base/videoframe_unittest.h',
+        'media/webrtc/webrtcvideoframe_unittest.cc',
 
         # Disabled because some tests fail.
         # TODO(ronghuawu): Reenable these tests.
@@ -171,6 +153,11 @@
       ],
       'conditions': [
         ['OS=="win"', {
+          'dependencies': [
+            '<(DEPTH)/net/third_party/nss/ssl.gyp:libssl',
+            '<(DEPTH)/third_party/nss/nss.gyp:nspr',
+            '<(DEPTH)/third_party/nss/nss.gyp:nss',
+          ],
           'msvs_settings': {
             'VCLinkerTool': {
               'AdditionalDependencies': [
@@ -194,7 +181,7 @@
       'type': 'executable',
       'dependencies': [
         '<(DEPTH)/third_party/libsrtp/libsrtp.gyp:libsrtp',
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
         'libjingle.gyp:libjingle',
         'libjingle.gyp:libjingle_p2p',
         'libjingle_unittest_main',
@@ -253,7 +240,7 @@
       'type': 'executable',
       'dependencies': [
         '<(DEPTH)/testing/gmock.gyp:gmock',
-        '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+        '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
         'libjingle.gyp:libjingle',
         'libjingle.gyp:libjingle_p2p',
         'libjingle.gyp:libjingle_peerconnection',
@@ -386,7 +373,7 @@
           'type': 'executable',
           'includes': [ 'build/ios_tests.gypi', ],
           'dependencies': [
-            '<(webrtc_root)/base/base_tests.gyp:webrtc_base_tests_utils',
+            '<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
             'libjingle.gyp:libjingle_peerconnection_objc',
           ],
           'sources': [
@@ -486,20 +473,6 @@
           ],
         },
         {
-          'target_name': 'libjingle_sound_unittest_run',
-          'type': 'none',
-          'dependencies': [
-            'libjingle_sound_unittest',
-          ],
-          'includes': [
-            'build/isolate.gypi',
-            'libjingle_sound_unittest.isolate',
-          ],
-          'sources': [
-            'libjingle_sound_unittest.isolate',
-          ],
-        },
-        {
           'target_name': 'libjingle_unittest_run',
           'type': 'none',
           'dependencies': [
diff --git a/media/base/constants.cc b/media/base/constants.cc
index cd10ef7..19a960f 100644
--- a/media/base/constants.cc
+++ b/media/base/constants.cc
@@ -59,6 +59,7 @@
 const char kCodecParamStereo[] = "stereo";
 const char kCodecParamUseInbandFec[] = "useinbandfec";
 const char kCodecParamMaxAverageBitrate[] = "maxaveragebitrate";
+const char kCodecParamMaxPlaybackRate[] = "maxplaybackrate";
 
 const char kCodecParamSctpProtocol[] = "protocol";
 const char kCodecParamSctpStreams[] = "streams";
@@ -72,6 +73,7 @@
 const int kOpusDefaultSPropStereo = 0;
 const int kOpusDefaultStereo = 0;
 const int kOpusDefaultUseInbandFec = 0;
+const int kOpusDefaultMaxPlaybackRate = 48000;
 
 const int kPreferredMaxPTime = 60;
 const int kPreferredMinPTime = 10;
diff --git a/media/base/constants.h b/media/base/constants.h
index 5ac1be2..5168acb 100644
--- a/media/base/constants.h
+++ b/media/base/constants.h
@@ -62,6 +62,7 @@
 extern const char kCodecParamStereo[];
 extern const char kCodecParamUseInbandFec[];
 extern const char kCodecParamMaxAverageBitrate[];
+extern const char kCodecParamMaxPlaybackRate[];
 extern const char kCodecParamSctpProtocol[];
 extern const char kCodecParamSctpStreams[];
 
@@ -79,6 +80,8 @@
 extern const int kOpusDefaultSPropStereo;
 extern const int kOpusDefaultStereo;
 extern const int kOpusDefaultUseInbandFec;
+extern const int kOpusDefaultMaxPlaybackRate;
+
 // Prefered values in this code base. Note that they may differ from the default
 // values in http://tools.ietf.org/html/draft-spittka-payload-rtp-opus-03
 // Only frames larger or equal to 10 ms are currently supported in this code
diff --git a/media/base/executablehelpers.h b/media/base/executablehelpers.h
new file mode 100644
index 0000000..2dde010
--- /dev/null
+++ b/media/base/executablehelpers.h
@@ -0,0 +1,100 @@
+/*
+ * libjingle
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+#define TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+
+#ifdef OSX
+#include <mach-o/dyld.h>
+#endif
+
+#include <string>
+
+#include "webrtc/base/logging.h"
+#include "webrtc/base/pathutils.h"
+
+namespace rtc {
+
+// Returns the path to the running executable or an empty path.
+// TODO(thorcarpenter): Consolidate with FluteClient::get_executable_dir.
+inline Pathname GetExecutablePath() {
+  const int32 kMaxExePathSize = 255;
+#ifdef WIN32
+  TCHAR exe_path_buffer[kMaxExePathSize];
+  DWORD copied_length = GetModuleFileName(NULL,  // NULL = Current process
+                                          exe_path_buffer, kMaxExePathSize);
+  if (0 == copied_length) {
+    LOG(LS_ERROR) << "Copied length is zero";
+    return rtc::Pathname();
+  }
+  if (kMaxExePathSize == copied_length) {
+    LOG(LS_ERROR) << "Buffer too small";
+    return rtc::Pathname();
+  }
+#ifdef UNICODE
+  std::wstring wdir(exe_path_buffer);
+  std::string dir_tmp(wdir.begin(), wdir.end());
+  rtc::Pathname path(dir_tmp);
+#else  // UNICODE
+  rtc::Pathname path(exe_path_buffer);
+#endif  // UNICODE
+#elif defined(OSX) || defined(LINUX)
+  char exe_path_buffer[kMaxExePathSize];
+#ifdef OSX
+  uint32_t copied_length = kMaxExePathSize - 1;
+  if (_NSGetExecutablePath(exe_path_buffer, &copied_length) == -1) {
+    LOG(LS_ERROR) << "Buffer too small";
+    return rtc::Pathname();
+  }
+#elif defined LINUX
+  int32 copied_length = kMaxExePathSize - 1;
+  const char* kProcExeFmt = "/proc/%d/exe";
+  char proc_exe_link[40];
+  snprintf(proc_exe_link, sizeof(proc_exe_link), kProcExeFmt, getpid());
+  copied_length = readlink(proc_exe_link, exe_path_buffer, copied_length);
+  if (copied_length == -1) {
+    LOG_ERR(LS_ERROR) << "Error reading link " << proc_exe_link;
+    return rtc::Pathname();
+  }
+  if (copied_length == kMaxExePathSize - 1) {
+    LOG(LS_ERROR) << "Probably truncated result when reading link "
+                  << proc_exe_link;
+    return rtc::Pathname();
+  }
+  exe_path_buffer[copied_length] = '\0';
+#endif  // LINUX
+  rtc::Pathname path(exe_path_buffer);
+#else  // Android || IOS
+  rtc::Pathname path;
+#endif  // OSX || LINUX
+  return path;
+}
+
+}  // namespace rtc
+
+#endif  // TALK_MEDIA_BASE_EXECUTABLEHELPERS_H_
+
diff --git a/media/base/fakemediaengine.h b/media/base/fakemediaengine.h
index 7bc3958..a6eabef 100644
--- a/media/base/fakemediaengine.h
+++ b/media/base/fakemediaengine.h
@@ -868,7 +868,7 @@
 
 class FakeVideoEngine : public FakeBaseEngine {
  public:
-  FakeVideoEngine() : renderer_(NULL), capture_(false), processor_(NULL) {
+  FakeVideoEngine() : capture_(false), processor_(NULL) {
     // Add a fake video codec. Note that the name must not be "" as there are
     // sanity checks against that.
     codecs_.push_back(VideoCodec(0, "fake_video_codec", 0, 0, 0, 0));
@@ -926,10 +926,6 @@
     options_changed_ = true;
     return true;
   }
-  bool SetLocalRenderer(VideoRenderer* r) {
-    renderer_ = r;
-    return true;
-  }
   bool SetCapture(bool capture) {
     capture_ = capture;
     return true;
@@ -946,7 +942,6 @@
   std::vector<VideoCodec> codecs_;
   VideoEncoderConfig default_encoder_config_;
   std::string in_device_;
-  VideoRenderer* renderer_;
   bool capture_;
   VideoProcessor* processor_;
   VideoOptions options_;
@@ -994,7 +989,6 @@
   }
   const std::string& audio_in_device() const { return voice_.in_device_; }
   const std::string& audio_out_device() const { return voice_.out_device_; }
-  VideoRenderer* local_renderer() { return video_.renderer_; }
   int voice_loglevel() const { return voice_.loglevel_; }
   const std::string& voice_logfilter() const { return voice_.logfilter_; }
   int video_loglevel() const { return video_.loglevel_; }
diff --git a/media/base/filemediaengine.h b/media/base/filemediaengine.h
index e546328..d3e99a8 100644
--- a/media/base/filemediaengine.h
+++ b/media/base/filemediaengine.h
@@ -88,7 +88,6 @@
   virtual SoundclipMedia* CreateSoundclip() { return NULL; }
   virtual AudioOptions GetAudioOptions() const { return AudioOptions(); }
   virtual bool SetAudioOptions(const AudioOptions& options) { return true; }
-  virtual bool SetVideoOptions(const VideoOptions& options) { return true; }
   virtual bool SetAudioDelayOffset(int offset) { return true; }
   virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
     return true;
@@ -113,7 +112,6 @@
   virtual bool SetOutputVolume(int level) { return true; }
   virtual int GetInputLevel() { return 0; }
   virtual bool SetLocalMonitor(bool enable) { return true; }
-  virtual bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
   // TODO(whyuan): control channel send?
   virtual bool SetVideoCapture(bool capture) { return true; }
   virtual const std::vector<AudioCodec>& audio_codecs() {
diff --git a/media/base/filemediaengine_unittest.cc b/media/base/filemediaengine_unittest.cc
index 1f7405d..c542baf 100644
--- a/media/base/filemediaengine_unittest.cc
+++ b/media/base/filemediaengine_unittest.cc
@@ -223,8 +223,6 @@
   EXPECT_TRUE(NULL == engine_->CreateSoundclip());
   cricket::AudioOptions audio_options;
   EXPECT_TRUE(engine_->SetAudioOptions(audio_options));
-  cricket::VideoOptions video_options;
-  EXPECT_TRUE(engine_->SetVideoOptions(video_options));
   VideoEncoderConfig video_encoder_config;
   EXPECT_TRUE(engine_->SetDefaultVideoEncoderConfig(video_encoder_config));
   EXPECT_TRUE(engine_->SetSoundDevices(NULL, NULL));
@@ -232,7 +230,6 @@
   EXPECT_TRUE(engine_->SetOutputVolume(0));
   EXPECT_EQ(0, engine_->GetInputLevel());
   EXPECT_TRUE(engine_->SetLocalMonitor(true));
-  EXPECT_TRUE(engine_->SetLocalRenderer(NULL));
   EXPECT_TRUE(engine_->SetVideoCapture(true));
   EXPECT_EQ(0U, engine_->audio_codecs().size());
   EXPECT_EQ(0U, engine_->video_codecs().size());
diff --git a/media/base/hybridvideoengine.cc b/media/base/hybridvideoengine.cc
deleted file mode 100644
index 289c4fe..0000000
--- a/media/base/hybridvideoengine.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/media/base/hybridvideoengine.h"
-
-#include "webrtc/base/logging.h"
-
-namespace cricket {
-
-HybridVideoMediaChannel::HybridVideoMediaChannel(
-    HybridVideoEngineInterface* engine,
-    VideoMediaChannel* channel1,
-    VideoMediaChannel* channel2)
-    : engine_(engine),
-      channel1_(channel1),
-      channel2_(channel2),
-      active_channel_(NULL),
-      sending_(false) {
-}
-
-HybridVideoMediaChannel::~HybridVideoMediaChannel() {
-}
-
-void HybridVideoMediaChannel::SetInterface(NetworkInterface* iface) {
-  if (channel1_) {
-    channel1_->SetInterface(iface);
-  }
-  if (channel2_) {
-    channel2_->SetInterface(iface);
-  }
-}
-
-bool HybridVideoMediaChannel::SetOptions(const VideoOptions &options) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->SetOptions(options);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->SetOptions(options);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::GetOptions(VideoOptions *options) const {
-  if (active_channel_) {
-    return active_channel_->GetOptions(options);
-  }
-  if (channel1_) {
-    return channel1_->GetOptions(options);
-  }
-  if (channel2_) {
-    return channel2_->GetOptions(options);
-  }
-  return false;
-}
-
-bool HybridVideoMediaChannel::SetRecvCodecs(
-    const std::vector<VideoCodec>& codecs) {
-  // Only give each channel the codecs it knows about.
-  bool ret = true;
-  std::vector<VideoCodec> codecs1, codecs2;
-  SplitCodecs(codecs, &codecs1, &codecs2);
-  if (channel1_) {
-    ret = channel1_->SetRecvCodecs(codecs1);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->SetRecvCodecs(codecs2);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::SetRecvRtpHeaderExtensions(
-    const std::vector<RtpHeaderExtension>& extensions) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->SetRecvRtpHeaderExtensions(extensions);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->SetRecvRtpHeaderExtensions(extensions);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::SetRenderer(uint32 ssrc,
-                                          VideoRenderer* renderer) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->SetRenderer(ssrc, renderer);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->SetRenderer(ssrc, renderer);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::SetRender(bool render) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->SetRender(render);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->SetRender(render);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::MuteStream(uint32 ssrc, bool muted) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->MuteStream(ssrc, muted);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->MuteStream(ssrc, muted);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::SetSendCodecs(
-    const std::vector<VideoCodec>& codecs) {
-  // Use the input to this function to decide what impl we're going to use.
-  if (!active_channel_ && !SelectActiveChannel(codecs)) {
-    LOG(LS_WARNING) << "Failed to select active channel";
-    return false;
-  }
-  // Only give the active channel the codecs it knows about.
-  std::vector<VideoCodec> codecs1, codecs2;
-  SplitCodecs(codecs, &codecs1, &codecs2);
-  const std::vector<VideoCodec>& codecs_to_set =
-      (active_channel_ == channel1_.get()) ? codecs1 : codecs2;
-  bool return_value = active_channel_->SetSendCodecs(codecs_to_set);
-  if (!return_value) {
-    return false;
-  }
-  VideoCodec send_codec;
-  return_value = active_channel_->GetSendCodec(&send_codec);
-  if (!return_value) {
-    return false;
-  }
-  engine_->OnNewSendResolution(send_codec.width, send_codec.height);
-  active_channel_->UpdateAspectRatio(send_codec.width, send_codec.height);
-  return true;
-}
-
-bool HybridVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
-  if (!active_channel_) {
-    return false;
-  }
-  return active_channel_->GetSendCodec(send_codec);
-}
-
-bool HybridVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
-                                                  const VideoFormat& format) {
-  return active_channel_ && active_channel_->SetSendStreamFormat(ssrc, format);
-}
-
-bool HybridVideoMediaChannel::SetSendRtpHeaderExtensions(
-    const std::vector<RtpHeaderExtension>& extensions) {
-  return active_channel_ &&
-      active_channel_->SetSendRtpHeaderExtensions(extensions);
-}
-
-bool HybridVideoMediaChannel::SetStartSendBandwidth(int bps) {
-  return active_channel_ && active_channel_->SetStartSendBandwidth(bps);
-}
-
-bool HybridVideoMediaChannel::SetMaxSendBandwidth(int bps) {
-  return active_channel_ && active_channel_->SetMaxSendBandwidth(bps);
-}
-
-bool HybridVideoMediaChannel::SetSend(bool send) {
-  if (send == sending()) {
-    return true;  // no action required if already set.
-  }
-
-  bool ret = active_channel_ &&
-      active_channel_->SetSend(send);
-
-  // Returns error and don't connect the signal if starting up.
-  // Disconnects the signal anyway if shutting down.
-  if (ret || !send) {
-    // TODO(juberti): Remove this hack that connects the WebRTC channel
-    // to the capturer.
-    if (active_channel_ == channel1_.get()) {
-      engine_->OnSendChange1(channel1_.get(), send);
-    } else {
-      engine_->OnSendChange2(channel2_.get(), send);
-    }
-    // If succeeded, remember the state as is.
-    // If failed to open, sending_ should be false.
-    // If failed to stop, sending_ should also be false, as we disconnect the
-    // capture anyway.
-    // The failure on SetSend(false) is a known issue in webrtc.
-    sending_ = send;
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::SetCapturer(uint32 ssrc,
-                                          VideoCapturer* capturer) {
-  bool ret = true;
-  if (channel1_.get()) {
-    ret = channel1_->SetCapturer(ssrc, capturer);
-  }
-  if (channel2_.get() && ret) {
-    ret = channel2_->SetCapturer(ssrc, capturer);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::AddSendStream(const StreamParams& sp) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->AddSendStream(sp);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->AddSendStream(sp);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::RemoveSendStream(uint32 ssrc) {
-  bool ret = true;
-  if (channel1_) {
-    ret = channel1_->RemoveSendStream(ssrc);
-  }
-  if (channel2_ && ret) {
-    ret = channel2_->RemoveSendStream(ssrc);
-  }
-  return ret;
-}
-
-bool HybridVideoMediaChannel::AddRecvStream(const StreamParams& sp) {
-  return active_channel_ &&
-      active_channel_->AddRecvStream(sp);
-}
-
-bool HybridVideoMediaChannel::RemoveRecvStream(uint32 ssrc) {
-  return active_channel_ &&
-      active_channel_->RemoveRecvStream(ssrc);
-}
-
-bool HybridVideoMediaChannel::SendIntraFrame() {
-  return active_channel_ &&
-      active_channel_->SendIntraFrame();
-}
-
-bool HybridVideoMediaChannel::RequestIntraFrame() {
-  return active_channel_ &&
-      active_channel_->RequestIntraFrame();
-}
-
-bool HybridVideoMediaChannel::GetStats(
-    const StatsOptions& options, VideoMediaInfo* info) {
-  // TODO(juberti): Ensure that returning no stats until SetSendCodecs is OK.
-  return active_channel_ &&
-      active_channel_->GetStats(options, info);
-}
-
-void HybridVideoMediaChannel::OnPacketReceived(
-    rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
-  // Eat packets until we have an active channel;
-  if (active_channel_) {
-    active_channel_->OnPacketReceived(packet, packet_time);
-  } else {
-    LOG(LS_INFO) << "HybridVideoChannel: Eating early RTP packet";
-  }
-}
-
-void HybridVideoMediaChannel::OnRtcpReceived(
-    rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
-  // Eat packets until we have an active channel;
-  if (active_channel_) {
-    active_channel_->OnRtcpReceived(packet, packet_time);
-  } else {
-    LOG(LS_INFO) << "HybridVideoChannel: Eating early RTCP packet";
-  }
-}
-
-void HybridVideoMediaChannel::OnReadyToSend(bool ready) {
-  if (channel1_) {
-    channel1_->OnReadyToSend(ready);
-  }
-  if (channel2_) {
-    channel2_->OnReadyToSend(ready);
-  }
-}
-
-void HybridVideoMediaChannel::UpdateAspectRatio(int ratio_w, int ratio_h) {
-  if (active_channel_) active_channel_->UpdateAspectRatio(ratio_w, ratio_h);
-}
-
-bool HybridVideoMediaChannel::SelectActiveChannel(
-    const std::vector<VideoCodec>& codecs) {
-  if (!active_channel_ && !codecs.empty()) {
-    if (engine_->HasCodec1(codecs[0])) {
-      channel2_.reset();
-      active_channel_ = channel1_.get();
-    } else if (engine_->HasCodec2(codecs[0])) {
-      channel1_.reset();
-      active_channel_ = channel2_.get();
-    }
-  }
-  if (NULL == active_channel_) {
-    return false;
-  }
-  // Connect signals from the active channel.
-  active_channel_->SignalMediaError.connect(
-      this,
-      &HybridVideoMediaChannel::OnMediaError);
-  return true;
-}
-
-void HybridVideoMediaChannel::SplitCodecs(
-    const std::vector<VideoCodec>& codecs,
-    std::vector<VideoCodec>* codecs1, std::vector<VideoCodec>* codecs2) {
-  codecs1->clear();
-  codecs2->clear();
-  for (size_t i = 0; i < codecs.size(); ++i) {
-    if (engine_->HasCodec1(codecs[i])) {
-      codecs1->push_back(codecs[i]);
-    }
-    if (engine_->HasCodec2(codecs[i])) {
-      codecs2->push_back(codecs[i]);
-    }
-  }
-}
-
-void HybridVideoMediaChannel::OnMediaError(uint32 ssrc, Error error) {
-  SignalMediaError(ssrc, error);
-}
-
-}  // namespace cricket
diff --git a/media/base/hybridvideoengine.h b/media/base/hybridvideoengine.h
deleted file mode 100644
index 004d3cf..0000000
--- a/media/base/hybridvideoengine.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
-#define TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
-
-#include <string>
-#include <vector>
-
-#include "talk/media/base/codec.h"
-#include "talk/media/base/mediachannel.h"
-#include "talk/media/base/videocapturer.h"
-#include "talk/media/base/videocommon.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/sigslotrepeater.h"
-
-namespace cricket {
-
-struct Device;
-struct VideoFormat;
-class HybridVideoEngineInterface;
-class VideoCapturer;
-class VideoFrame;
-class VideoRenderer;
-
-// HybridVideoMediaChannels work with a HybridVideoEngine to combine
-// two unrelated VideoMediaChannel implementations into a single class.
-class HybridVideoMediaChannel : public VideoMediaChannel {
- public:
-  HybridVideoMediaChannel(HybridVideoEngineInterface* engine,
-                          VideoMediaChannel* channel1,
-                          VideoMediaChannel* channel2);
-  virtual ~HybridVideoMediaChannel();
-
-  // VideoMediaChannel methods
-  virtual void SetInterface(NetworkInterface* iface);
-  virtual bool SetOptions(const VideoOptions& options);
-  virtual bool GetOptions(VideoOptions* options) const;
-  virtual bool AddSendStream(const StreamParams& sp);
-  virtual bool RemoveSendStream(uint32 ssrc);
-  virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
-  virtual bool SetRender(bool render);
-  virtual bool MuteStream(uint32 ssrc, bool muted);
-
-  virtual bool SetRecvCodecs(const std::vector<VideoCodec>& codecs);
-  virtual bool SetRecvRtpHeaderExtensions(
-      const std::vector<RtpHeaderExtension>& extensions);
-
-  virtual bool SetSendCodecs(const std::vector<VideoCodec>& codecs);
-  virtual bool GetSendCodec(VideoCodec* codec);
-  virtual bool SetSendStreamFormat(uint32 ssrc, const VideoFormat& format);
-  virtual bool SetSendRtpHeaderExtensions(
-      const std::vector<RtpHeaderExtension>& extensions);
-  virtual bool SetStartSendBandwidth(int bps);
-  virtual bool SetMaxSendBandwidth(int bps);
-  virtual bool SetSend(bool send);
-
-  virtual bool AddRecvStream(const StreamParams& sp);
-  virtual bool RemoveRecvStream(uint32 ssrc);
-  virtual bool SetCapturer(uint32 ssrc, VideoCapturer* capturer);
-
-  virtual bool SendIntraFrame();
-  virtual bool RequestIntraFrame();
-
-  virtual bool GetStats(const StatsOptions& options, VideoMediaInfo* info);
-
-  virtual void OnPacketReceived(rtc::Buffer* packet,
-                                const rtc::PacketTime& packet_time);
-  virtual void OnRtcpReceived(rtc::Buffer* packet,
-                              const rtc::PacketTime& packet_time);
-  virtual void OnReadyToSend(bool ready);
-
-  virtual void UpdateAspectRatio(int ratio_w, int ratio_h);
-
-  void OnLocalFrame(VideoCapturer*, const VideoFrame*);
-  void OnLocalFrameFormat(VideoCapturer*, const VideoFormat*);
-
-  bool sending() const { return sending_; }
-
- private:
-  bool SelectActiveChannel(const std::vector<VideoCodec>& codecs);
-  void SplitCodecs(const std::vector<VideoCodec>& codecs,
-                   std::vector<VideoCodec>* codecs1,
-                   std::vector<VideoCodec>* codecs2);
-
-  void OnMediaError(uint32 ssrc, Error error);
-
-  HybridVideoEngineInterface* engine_;
-  rtc::scoped_ptr<VideoMediaChannel> channel1_;
-  rtc::scoped_ptr<VideoMediaChannel> channel2_;
-  VideoMediaChannel* active_channel_;
-  bool sending_;
-};
-
-// Interface class for HybridVideoChannels to talk to the engine.
-class HybridVideoEngineInterface {
- public:
-  virtual ~HybridVideoEngineInterface() {}
-  virtual bool HasCodec1(const VideoCodec& codec) = 0;
-  virtual bool HasCodec2(const VideoCodec& codec) = 0;
-  virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) = 0;
-  virtual void OnSendChange2(VideoMediaChannel* channel1, bool send) = 0;
-  virtual void OnNewSendResolution(int width, int height) = 0;
-};
-
-// The HybridVideoEngine class combines two unrelated VideoEngine impls
-// into a single class. It creates HybridVideoMediaChannels that also contain
-// a VideoMediaChannel implementation from each engine. Policy is then used
-// during call setup to determine which VideoMediaChannel should be used.
-// Currently, this policy is based on what codec the remote side wants to use.
-template<class VIDEO1, class VIDEO2>
-class HybridVideoEngine : public HybridVideoEngineInterface {
- public:
-  HybridVideoEngine() {
-    // Unify the codec lists.
-    codecs_ = video1_.codecs();
-    codecs_.insert(codecs_.end(), video2_.codecs().begin(),
-                   video2_.codecs().end());
-
-    rtp_header_extensions_ = video1_.rtp_header_extensions();
-    rtp_header_extensions_.insert(rtp_header_extensions_.end(),
-                                  video2_.rtp_header_extensions().begin(),
-                                  video2_.rtp_header_extensions().end());
-
-    SignalCaptureStateChange.repeat(video2_.SignalCaptureStateChange);
-  }
-
-  bool Init(rtc::Thread* worker_thread) {
-    if (!video1_.Init(worker_thread)) {
-      LOG(LS_ERROR) << "Failed to init VideoEngine1";
-      return false;
-    }
-    if (!video2_.Init(worker_thread)) {
-      LOG(LS_ERROR) << "Failed to init VideoEngine2";
-      video1_.Terminate();
-      return false;
-    }
-    return true;
-  }
-  void Terminate() {
-    video1_.Terminate();
-    video2_.Terminate();
-  }
-
-  int GetCapabilities() {
-    return (video1_.GetCapabilities() | video2_.GetCapabilities());
-  }
-  HybridVideoMediaChannel* CreateChannel(VoiceMediaChannel* channel) {
-    rtc::scoped_ptr<VideoMediaChannel> channel1(
-        video1_.CreateChannel(channel));
-    if (!channel1) {
-      LOG(LS_ERROR) << "Failed to create VideoMediaChannel1";
-      return NULL;
-    }
-    rtc::scoped_ptr<VideoMediaChannel> channel2(
-        video2_.CreateChannel(channel));
-    if (!channel2) {
-      LOG(LS_ERROR) << "Failed to create VideoMediaChannel2";
-      return NULL;
-    }
-    return new HybridVideoMediaChannel(this,
-        channel1.release(), channel2.release());
-  }
-
-  bool SetOptions(const VideoOptions& options) {
-    return video1_.SetOptions(options) && video2_.SetOptions(options);
-  }
-  bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
-    VideoEncoderConfig conf = config;
-    if (video1_.codecs().size() > 0) {
-      conf.max_codec.name = video1_.codecs()[0].name;
-      if (!video1_.SetDefaultEncoderConfig(conf)) {
-        LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video1";
-        return false;
-      }
-    }
-    if (video2_.codecs().size() > 0) {
-      conf.max_codec.name = video2_.codecs()[0].name;
-      if (!video2_.SetDefaultEncoderConfig(conf)) {
-        LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video2";
-        return false;
-      }
-    }
-    return true;
-  }
-  VideoEncoderConfig GetDefaultEncoderConfig() const {
-    // This looks pretty strange, but, in practice, it'll do sane things if
-    // GetDefaultEncoderConfig is only called after SetDefaultEncoderConfig,
-    // since both engines should be essentially equivalent at that point. If it
-    // hasn't been called, though, we'll use the first meaningful encoder
-    // config, or the config from the second video engine if neither are
-    // meaningful.
-    VideoEncoderConfig config = video1_.GetDefaultEncoderConfig();
-    if (config.max_codec.width != 0) {
-      return config;
-    } else {
-      return video2_.GetDefaultEncoderConfig();
-    }
-  }
-  const std::vector<VideoCodec>& codecs() const {
-    return codecs_;
-  }
-  const std::vector<RtpHeaderExtension>& rtp_header_extensions() const {
-    return rtp_header_extensions_;
-  }
-  void SetLogging(int min_sev, const char* filter) {
-    video1_.SetLogging(min_sev, filter);
-    video2_.SetLogging(min_sev, filter);
-  }
-
-  VideoFormat GetStartCaptureFormat() const {
-    return video2_.GetStartCaptureFormat();
-  }
-
-  // TODO(juberti): Remove these functions after we do the capturer refactoring.
-  // For now they are set to always use the second engine for capturing, which
-  // is convenient given our intended use case.
-  bool SetCaptureDevice(const Device* device) {
-    return video2_.SetCaptureDevice(device);
-  }
-  VideoCapturer* GetVideoCapturer() const {
-    return video2_.GetVideoCapturer();
-  }
-  bool SetLocalRenderer(VideoRenderer* renderer) {
-    return video2_.SetLocalRenderer(renderer);
-  }
-  sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
-
-  virtual bool HasCodec1(const VideoCodec& codec) {
-    return HasCodec(video1_, codec);
-  }
-  virtual bool HasCodec2(const VideoCodec& codec) {
-    return HasCodec(video2_, codec);
-  }
-  template<typename VIDEO>
-  bool HasCodec(const VIDEO& engine, const VideoCodec& codec) const {
-    for (std::vector<VideoCodec>::const_iterator i = engine.codecs().begin();
-         i != engine.codecs().end();
-         ++i) {
-      if (i->Matches(codec)) {
-        return true;
-      }
-    }
-    return false;
-  }
-  virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) {
-  }
-  virtual void OnSendChange2(VideoMediaChannel* channel2, bool send) {
-  }
-  virtual void OnNewSendResolution(int width, int height) {
-  }
-
- protected:
-  VIDEO1 video1_;
-  VIDEO2 video2_;
-  std::vector<VideoCodec> codecs_;
-  std::vector<RtpHeaderExtension> rtp_header_extensions_;
-};
-
-}  // namespace cricket
-
-#endif  // TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_
diff --git a/media/base/hybridvideoengine_unittest.cc b/media/base/hybridvideoengine_unittest.cc
deleted file mode 100644
index 7b409ea..0000000
--- a/media/base/hybridvideoengine_unittest.cc
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * libjingle
- * Copyright 2004 Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/media/base/fakemediaengine.h"
-#include "talk/media/base/fakenetworkinterface.h"
-#include "talk/media/base/fakevideocapturer.h"
-#include "talk/media/base/hybridvideoengine.h"
-#include "talk/media/base/mediachannel.h"
-#include "talk/media/base/testutils.h"
-#include "webrtc/base/gunit.h"
-
-static const cricket::VideoCodec kGenericCodec(97, "Generic", 640, 360, 30, 0);
-static const cricket::VideoCodec kVp8Codec(100, "VP8", 640, 360, 30, 0);
-static const cricket::VideoCodec kCodecsVp8Only[] = { kVp8Codec };
-static const cricket::VideoCodec kCodecsGenericOnly[] = { kGenericCodec };
-static const cricket::VideoCodec kCodecsVp8First[] = { kVp8Codec,
-                                                       kGenericCodec };
-static const cricket::VideoCodec kCodecsGenericFirst[] = { kGenericCodec,
-                                                           kVp8Codec };
-
-using cricket::StreamParams;
-
-class FakeVp8VideoEngine : public cricket::FakeVideoEngine {
- public:
-  FakeVp8VideoEngine() {
-    SetCodecs(MAKE_VECTOR(kCodecsVp8Only));
-  }
-};
-class FakeGenericVideoEngine : public cricket::FakeVideoEngine {
- public:
-  FakeGenericVideoEngine() {
-    SetCodecs(MAKE_VECTOR(kCodecsGenericOnly));
-  }
-
-  // For testing purposes, mimic the behavior of a media engine that throws out
-  // resolutions that don't match the codec list. A width or height of 0
-  // trivially will never match the codec list, so this is sufficient for
-  // testing the case we want (0x0).
-  virtual bool FindCodec(const cricket::VideoCodec& codec) {
-    if (codec.width == 0 || codec.height == 0) {
-      return false;
-    } else {
-      return cricket::FakeVideoEngine::FindCodec(codec);
-    }
-  }
-};
-class HybridVideoEngineForTest : public cricket::HybridVideoEngine<
-    FakeVp8VideoEngine, FakeGenericVideoEngine> {
- public:
-  HybridVideoEngineForTest()
-      :
-      num_ch1_send_on_(0),
-      num_ch1_send_off_(0),
-      send_width_(0),
-      send_height_(0) { }
-  cricket::FakeVideoEngine* sub_engine1() { return &video1_; }
-  cricket::FakeVideoEngine* sub_engine2() { return &video2_; }
-
-  // From base class HybridVideoEngine.
-  void OnSendChange1(cricket::VideoMediaChannel* channel1, bool send) {
-    if (send) {
-      ++num_ch1_send_on_;
-    } else {
-      ++num_ch1_send_off_;
-    }
-  }
-  // From base class HybridVideoEngine
-  void OnNewSendResolution(int width, int height) {
-    send_width_ = width;
-    send_height_ = height;
-  }
-
-  int num_ch1_send_on() const { return num_ch1_send_on_; }
-  int num_ch1_send_off() const { return num_ch1_send_off_; }
-
-  int send_width() const { return send_width_; }
-  int send_height() const { return send_height_; }
-
- private:
-  int num_ch1_send_on_;
-  int num_ch1_send_off_;
-
-  int send_width_;
-  int send_height_;
-};
-
-class HybridVideoEngineTest : public testing::Test {
- public:
-  HybridVideoEngineTest() : sub_channel1_(NULL), sub_channel2_(NULL) {
-  }
-  ~HybridVideoEngineTest() {
-    engine_.Terminate();
-  }
-  bool SetupEngine() {
-    bool result = engine_.Init(rtc::Thread::Current());
-    if (result) {
-      channel_.reset(engine_.CreateChannel(NULL));
-      result = (channel_.get() != NULL);
-      sub_channel1_ = engine_.sub_engine1()->GetChannel(0);
-      sub_channel2_ = engine_.sub_engine2()->GetChannel(0);
-    }
-    return result;
-  }
-  bool SetupRenderAndAddStream(const StreamParams& sp) {
-    if (!SetupEngine())
-      return false;
-    channel_->SetInterface(transport_.get());
-    return channel_->SetRecvCodecs(engine_.codecs()) &&
-        channel_->AddSendStream(sp) &&
-        channel_->SetRender(true);
-  }
-  void DeliverPacket(const void* data, int len) {
-    rtc::Buffer packet(data, len);
-    channel_->OnPacketReceived(&packet, rtc::CreatePacketTime(0));
-  }
-  void DeliverRtcp(const void* data, int len) {
-    rtc::Buffer packet(data, len);
-    channel_->OnRtcpReceived(&packet, rtc::CreatePacketTime(0));
-  }
-
- protected:
-  void TestSetSendCodecs(cricket::FakeVideoEngine* sub_engine,
-                         const std::vector<cricket::VideoCodec>& codecs) {
-    EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
-    EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-    cricket::FakeVideoMediaChannel* sub_channel = sub_engine->GetChannel(0);
-    ASSERT_EQ(1U, sub_channel->send_codecs().size());
-    EXPECT_EQ(codecs[0], sub_channel->send_codecs()[0]);
-    EXPECT_TRUE(channel_->SetSend(true));
-    EXPECT_TRUE(sub_channel->sending());
-  }
-  void TestSetSendBandwidth(cricket::FakeVideoEngine* sub_engine,
-                            const std::vector<cricket::VideoCodec>& codecs,
-                            int start_bitrate,
-                            int max_bitrate) {
-    EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
-    EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-    EXPECT_TRUE(channel_->SetStartSendBandwidth(start_bitrate));
-    EXPECT_TRUE(channel_->SetMaxSendBandwidth(max_bitrate));
-    cricket::FakeVideoMediaChannel* sub_channel = sub_engine->GetChannel(0);
-    EXPECT_EQ(start_bitrate, sub_channel->start_bps());
-    EXPECT_EQ(max_bitrate, sub_channel->max_bps());
-  }
-  HybridVideoEngineForTest engine_;
-  rtc::scoped_ptr<cricket::HybridVideoMediaChannel> channel_;
-  rtc::scoped_ptr<cricket::FakeNetworkInterface> transport_;
-  cricket::FakeVideoMediaChannel* sub_channel1_;
-  cricket::FakeVideoMediaChannel* sub_channel2_;
-};
-
-TEST_F(HybridVideoEngineTest, StartupShutdown) {
-  EXPECT_TRUE(engine_.Init(rtc::Thread::Current()));
-  engine_.Terminate();
-}
-
-// Tests that SetDefaultVideoEncoderConfig passes down to both engines.
-TEST_F(HybridVideoEngineTest, SetDefaultVideoEncoderConfig) {
-  cricket::VideoEncoderConfig config(
-      cricket::VideoCodec(105, "", 640, 400, 30, 0), 1, 2);
-  EXPECT_TRUE(engine_.SetDefaultEncoderConfig(config));
-
-  cricket::VideoEncoderConfig config_1 = config;
-  config_1.max_codec.name = kCodecsVp8Only[0].name;
-  EXPECT_EQ(config_1, engine_.sub_engine1()->default_encoder_config());
-
-  cricket::VideoEncoderConfig config_2 = config;
-  config_2.max_codec.name = kCodecsGenericOnly[0].name;
-  EXPECT_EQ(config_2, engine_.sub_engine2()->default_encoder_config());
-}
-
-// Tests that GetDefaultVideoEncoderConfig picks a meaningful encoder config
-// based on the underlying engine config and then after a call to
-// SetDefaultEncoderConfig on the hybrid engine.
-TEST_F(HybridVideoEngineTest, SetDefaultVideoEncoderConfigDefaultValue) {
-  cricket::VideoEncoderConfig blank_config;
-  cricket::VideoEncoderConfig meaningful_config1(
-      cricket::VideoCodec(111, "abcd", 320, 240, 30, 0), 1, 2);
-  cricket::VideoEncoderConfig meaningful_config2(
-      cricket::VideoCodec(111, "abcd", 1280, 720, 30, 0), 1, 2);
-  cricket::VideoEncoderConfig meaningful_config3(
-      cricket::VideoCodec(111, "abcd", 640, 360, 30, 0), 1, 2);
-  engine_.sub_engine1()->SetDefaultEncoderConfig(blank_config);
-  engine_.sub_engine2()->SetDefaultEncoderConfig(blank_config);
-  EXPECT_EQ(blank_config, engine_.GetDefaultEncoderConfig());
-
-  engine_.sub_engine2()->SetDefaultEncoderConfig(meaningful_config2);
-  EXPECT_EQ(meaningful_config2, engine_.GetDefaultEncoderConfig());
-
-  engine_.sub_engine1()->SetDefaultEncoderConfig(meaningful_config1);
-  EXPECT_EQ(meaningful_config1, engine_.GetDefaultEncoderConfig());
-
-  EXPECT_TRUE(engine_.SetDefaultEncoderConfig(meaningful_config3));
-  // The overall config should now match, though the codec name will have been
-  // rewritten for the first media engine.
-  meaningful_config3.max_codec.name = kCodecsVp8Only[0].name;
-  EXPECT_EQ(meaningful_config3, engine_.GetDefaultEncoderConfig());
-}
-
-// Tests that our engine has the right codecs in the right order.
-TEST_F(HybridVideoEngineTest, CheckCodecs) {
-  const std::vector<cricket::VideoCodec>& c = engine_.codecs();
-  ASSERT_EQ(2U, c.size());
-  EXPECT_EQ(kVp8Codec, c[0]);
-  EXPECT_EQ(kGenericCodec, c[1]);
-}
-
-// Tests that our engine has the right caps.
-TEST_F(HybridVideoEngineTest, CheckCaps) {
-  EXPECT_EQ(cricket::VIDEO_SEND | cricket::VIDEO_RECV,
-      engine_.GetCapabilities());
-}
-
-// Tests that we can create and destroy a channel.
-TEST_F(HybridVideoEngineTest, CreateChannel) {
-  EXPECT_TRUE(SetupEngine());
-  EXPECT_TRUE(sub_channel1_ != NULL);
-  EXPECT_TRUE(sub_channel2_ != NULL);
-}
-
-// Tests that we properly handle failures in CreateChannel.
-TEST_F(HybridVideoEngineTest, CreateChannelFail) {
-  engine_.sub_engine1()->set_fail_create_channel(true);
-  EXPECT_FALSE(SetupEngine());
-  EXPECT_TRUE(channel_.get() == NULL);
-  EXPECT_TRUE(sub_channel1_ == NULL);
-  EXPECT_TRUE(sub_channel2_ == NULL);
-  engine_.sub_engine1()->set_fail_create_channel(false);
-  engine_.sub_engine2()->set_fail_create_channel(true);
-  EXPECT_FALSE(SetupEngine());
-  EXPECT_TRUE(channel_.get() == NULL);
-  EXPECT_TRUE(sub_channel1_ == NULL);
-  EXPECT_TRUE(sub_channel2_ == NULL);
-}
-
-// Test that we set our inbound codecs and settings properly.
-TEST_F(HybridVideoEngineTest, SetLocalDescription) {
-  EXPECT_TRUE(SetupEngine());
-  channel_->SetInterface(transport_.get());
-  EXPECT_TRUE(channel_->SetRecvCodecs(engine_.codecs()));
-  ASSERT_EQ(1U, sub_channel1_->recv_codecs().size());
-  ASSERT_EQ(1U, sub_channel2_->recv_codecs().size());
-  EXPECT_EQ(kVp8Codec, sub_channel1_->recv_codecs()[0]);
-  EXPECT_EQ(kGenericCodec, sub_channel2_->recv_codecs()[0]);
-  StreamParams stream;
-  stream.id = "TestStream";
-  stream.ssrcs.push_back(1234);
-  stream.cname = "5678";
-  EXPECT_TRUE(channel_->AddSendStream(stream));
-  EXPECT_EQ(1234U, sub_channel1_->send_ssrc());
-  EXPECT_EQ(1234U, sub_channel2_->send_ssrc());
-  EXPECT_EQ("5678", sub_channel1_->rtcp_cname());
-  EXPECT_EQ("5678", sub_channel2_->rtcp_cname());
-  EXPECT_TRUE(channel_->SetRender(true));
-  // We've called SetRender, so we should be playing out, but not yet sending.
-  EXPECT_TRUE(sub_channel1_->playout());
-  EXPECT_TRUE(sub_channel2_->playout());
-  EXPECT_FALSE(sub_channel1_->sending());
-  EXPECT_FALSE(sub_channel2_->sending());
-  // We may get SetSend(false) calls during call setup.
-  // Since this causes no change in state, they should no-op and return true.
-  EXPECT_TRUE(channel_->SetSend(false));
-  EXPECT_FALSE(sub_channel1_->sending());
-  EXPECT_FALSE(sub_channel2_->sending());
-}
-
-TEST_F(HybridVideoEngineTest, OnNewSendResolution) {
-  EXPECT_TRUE(SetupEngine());
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-  EXPECT_EQ(640, engine_.send_width());
-  EXPECT_EQ(360, engine_.send_height());
-}
-
-// Test that we converge to the active channel for engine 1.
-TEST_F(HybridVideoEngineTest, SetSendCodecs1) {
-  // This will nuke the object that sub_channel2_ points to.
-  TestSetSendCodecs(engine_.sub_engine1(), MAKE_VECTOR(kCodecsVp8First));
-  EXPECT_TRUE(engine_.sub_engine2()->GetChannel(0) == NULL);
-}
-
-// Test that we converge to the active channel for engine 2.
-TEST_F(HybridVideoEngineTest, SetSendCodecs2) {
-  // This will nuke the object that sub_channel1_ points to.
-  TestSetSendCodecs(engine_.sub_engine2(), MAKE_VECTOR(kCodecsGenericFirst));
-  EXPECT_TRUE(engine_.sub_engine1()->GetChannel(0) == NULL);
-}
-
-// Test that we don't accidentally eat 0x0 in SetSendCodecs
-TEST_F(HybridVideoEngineTest, SetSendCodecs0x0) {
-  EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
-  // Send using generic codec, but with 0x0 resolution.
-  std::vector<cricket::VideoCodec> codecs(MAKE_VECTOR(kCodecsGenericFirst));
-  codecs.resize(1);
-  codecs[0].width = 0;
-  codecs[0].height = 0;
-  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
-}
-
-// Test setting the send bandwidth for VP8.
-TEST_F(HybridVideoEngineTest, SetSendBandwidth1) {
-  TestSetSendBandwidth(engine_.sub_engine1(),
-                       MAKE_VECTOR(kCodecsVp8First),
-                       100000,
-                       384000);
-}
-
-// Test setting the send bandwidth for a generic codec.
-TEST_F(HybridVideoEngineTest, SetSendBandwidth2) {
-  TestSetSendBandwidth(engine_.sub_engine2(),
-                       MAKE_VECTOR(kCodecsGenericFirst),
-                       100001,
-                       384002);
-}
-
-// Test that we dump RTP packets that arrive early.
-TEST_F(HybridVideoEngineTest, HandleEarlyRtp) {
-  static const uint8 kPacket[1024] = { 0 };
-  static const uint8 kRtcp[1024] = { 1 };
-  EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
-  DeliverPacket(kPacket, sizeof(kPacket));
-  DeliverRtcp(kRtcp, sizeof(kRtcp));
-  EXPECT_TRUE(sub_channel1_->CheckNoRtp());
-  EXPECT_TRUE(sub_channel2_->CheckNoRtp());
-  EXPECT_TRUE(sub_channel1_->CheckNoRtcp());
-  EXPECT_TRUE(sub_channel2_->CheckNoRtcp());
-}
-
-// Test that we properly pass on normal RTP packets.
-TEST_F(HybridVideoEngineTest, HandleRtp) {
-  static const uint8 kPacket[1024] = { 0 };
-  static const uint8 kRtcp[1024] = { 1 };
-  EXPECT_TRUE(SetupRenderAndAddStream(StreamParams::CreateLegacy(1234)));
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-  EXPECT_TRUE(channel_->SetSend(true));
-  DeliverPacket(kPacket, sizeof(kPacket));
-  DeliverRtcp(kRtcp, sizeof(kRtcp));
-  EXPECT_TRUE(sub_channel1_->CheckRtp(kPacket, sizeof(kPacket)));
-  EXPECT_TRUE(sub_channel1_->CheckRtcp(kRtcp, sizeof(kRtcp)));
-}
-
-// Test that we properly connect media error signal.
-TEST_F(HybridVideoEngineTest, MediaErrorSignal) {
-  cricket::VideoMediaErrorCatcher catcher;
-
-  // Verify no signal from either channel before the active channel is set.
-  EXPECT_TRUE(SetupEngine());
-  channel_->SignalMediaError.connect(&catcher,
-      &cricket::VideoMediaErrorCatcher::OnError);
-  sub_channel1_->SignalMediaError(1, cricket::VideoMediaChannel::ERROR_OTHER);
-  EXPECT_EQ(0U, catcher.ssrc());
-  sub_channel2_->SignalMediaError(2,
-      cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED);
-  EXPECT_EQ(0U, catcher.ssrc());
-
-  // Set vp8 as active channel and verify that a signal comes from it.
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-  sub_channel1_->SignalMediaError(1, cricket::VideoMediaChannel::ERROR_OTHER);
-  EXPECT_EQ(cricket::VideoMediaChannel::ERROR_OTHER, catcher.error());
-  EXPECT_EQ(1U, catcher.ssrc());
-
-  // Set generic codec as active channel and verify that a signal comes from it.
-  EXPECT_TRUE(SetupEngine());
-  channel_->SignalMediaError.connect(&catcher,
-      &cricket::VideoMediaErrorCatcher::OnError);
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsGenericFirst)));
-  sub_channel2_->SignalMediaError(2,
-      cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED);
-  EXPECT_EQ(cricket::VideoMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED,
-      catcher.error());
-  EXPECT_EQ(2U, catcher.ssrc());
-}
-
-// Test that SetSend doesn't re-enter.
-TEST_F(HybridVideoEngineTest, RepeatSetSend) {
-  EXPECT_TRUE(SetupEngine());
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-
-  // Verify initial status.
-  EXPECT_FALSE(channel_->sending());
-  EXPECT_FALSE(sub_channel1_->sending());
-  EXPECT_EQ(0, engine_.num_ch1_send_on());
-  EXPECT_EQ(0, engine_.num_ch1_send_off());
-
-  // Verfiy SetSend(true) works correctly.
-  EXPECT_TRUE(channel_->SetSend(true));
-  EXPECT_TRUE(channel_->sending());
-  EXPECT_TRUE(sub_channel1_->sending());
-  EXPECT_EQ(1, engine_.num_ch1_send_on());
-  EXPECT_EQ(0, engine_.num_ch1_send_off());
-
-  // SetSend(true) again and verify nothing changes.
-  EXPECT_TRUE(channel_->SetSend(true));
-  EXPECT_TRUE(channel_->sending());
-  EXPECT_TRUE(sub_channel1_->sending());
-  EXPECT_EQ(1, engine_.num_ch1_send_on());
-  EXPECT_EQ(0, engine_.num_ch1_send_off());
-
-  // Verify SetSend(false) works correctly.
-  EXPECT_TRUE(channel_->SetSend(false));
-  EXPECT_FALSE(channel_->sending());
-  EXPECT_FALSE(sub_channel1_->sending());
-  EXPECT_EQ(1, engine_.num_ch1_send_on());
-  EXPECT_EQ(1, engine_.num_ch1_send_off());
-
-  // SetSend(false) again and verfiy nothing changes.
-  EXPECT_TRUE(channel_->SetSend(false));
-  EXPECT_FALSE(channel_->sending());
-  EXPECT_FALSE(sub_channel1_->sending());
-  EXPECT_EQ(1, engine_.num_ch1_send_on());
-  EXPECT_EQ(1, engine_.num_ch1_send_off());
-}
-
-// Test that SetOptions.
-TEST_F(HybridVideoEngineTest, SetOptions) {
-  cricket::VideoOptions vmo;
-  vmo.video_high_bitrate.Set(true);
-  vmo.system_low_adaptation_threshhold.Set(0.10f);
-  EXPECT_TRUE(SetupEngine());
-  EXPECT_TRUE(channel_->SetOptions(vmo));
-
-  bool high_bitrate;
-  float low;
-  EXPECT_TRUE(sub_channel1_->GetOptions(&vmo));
-  EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
-  EXPECT_TRUE(high_bitrate);
-  EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
-  EXPECT_EQ(0.10f, low);
-  EXPECT_TRUE(sub_channel2_->GetOptions(&vmo));
-  EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
-  EXPECT_TRUE(high_bitrate);
-  EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
-  EXPECT_EQ(0.10f, low);
-
-  vmo.video_high_bitrate.Set(false);
-  vmo.system_low_adaptation_threshhold.Set(0.50f);
-
-  EXPECT_TRUE(channel_->SetOptions(vmo));
-  EXPECT_TRUE(sub_channel1_->GetOptions(&vmo));
-  EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
-  EXPECT_FALSE(high_bitrate);
-  EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
-  EXPECT_EQ(0.50f, low);
-  EXPECT_TRUE(sub_channel2_->GetOptions(&vmo));
-  EXPECT_TRUE(vmo.video_high_bitrate.Get(&high_bitrate));
-  EXPECT_FALSE(high_bitrate);
-  EXPECT_TRUE(vmo.system_low_adaptation_threshhold.Get(&low));
-  EXPECT_EQ(0.50f, low);
-}
-
-TEST_F(HybridVideoEngineTest, SetCapturer) {
-  EXPECT_TRUE(SetupEngine());
-  // Set vp8 as active channel and verify that capturer can be set.
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsVp8First)));
-  cricket::FakeVideoCapturer fake_video_capturer;
-  EXPECT_TRUE(channel_->SetCapturer(0, &fake_video_capturer));
-  EXPECT_TRUE(channel_->SetCapturer(0, NULL));
-
-  // Set generic codec active channel and verify that capturer can be set.
-  EXPECT_TRUE(SetupEngine());
-  EXPECT_TRUE(channel_->SetSendCodecs(MAKE_VECTOR(kCodecsGenericFirst)));
-  EXPECT_TRUE(channel_->SetCapturer(0, &fake_video_capturer));
-  EXPECT_TRUE(channel_->SetCapturer(0, NULL));
-}
diff --git a/media/base/mediachannel.h b/media/base/mediachannel.h
index 62d6b61..5232e5d 100644
--- a/media/base/mediachannel.h
+++ b/media/base/mediachannel.h
@@ -182,6 +182,7 @@
     recording_sample_rate.SetFrom(change.recording_sample_rate);
     playout_sample_rate.SetFrom(change.playout_sample_rate);
     dscp.SetFrom(change.dscp);
+    combined_audio_video_bwe.SetFrom(change.combined_audio_video_bwe);
   }
 
   bool operator==(const AudioOptions& o) const {
@@ -207,7 +208,8 @@
         rx_agc_limiter == o.rx_agc_limiter &&
         recording_sample_rate == o.recording_sample_rate &&
         playout_sample_rate == o.playout_sample_rate &&
-        dscp == o.dscp;
+        dscp == o.dscp &&
+        combined_audio_video_bwe == o.combined_audio_video_bwe;
   }
 
   std::string ToString() const {
@@ -238,6 +240,7 @@
     ost << ToStringIfSet("recording_sample_rate", recording_sample_rate);
     ost << ToStringIfSet("playout_sample_rate", playout_sample_rate);
     ost << ToStringIfSet("dscp", dscp);
+    ost << ToStringIfSet("combined_audio_video_bwe", combined_audio_video_bwe);
     ost << "}";
     return ost.str();
   }
@@ -275,6 +278,8 @@
   Settable<uint32> playout_sample_rate;
   // Set DSCP value for packet sent from audio channel.
   Settable<bool> dscp;
+  // Enable combined audio+bandwidth BWE.
+  Settable<bool> combined_audio_video_bwe;
 };
 
 // Options that can be applied to a VideoMediaChannel or a VideoMediaEngine.
diff --git a/media/base/mediaengine.h b/media/base/mediaengine.h
index 6c4b740..f30e3b1 100644
--- a/media/base/mediaengine.h
+++ b/media/base/mediaengine.h
@@ -91,8 +91,6 @@
   virtual AudioOptions GetAudioOptions() const = 0;
   // Sets global audio options. "options" are from AudioOptions, above.
   virtual bool SetAudioOptions(const AudioOptions& options) = 0;
-  // Sets global video options. "options" are from VideoOptions, above.
-  virtual bool SetVideoOptions(const VideoOptions& options) = 0;
   // Sets the value used by the echo canceller to offset delay values obtained
   // from the OS.
   virtual bool SetAudioDelayOffset(int offset) = 0;
@@ -124,7 +122,6 @@
   // when a VoiceMediaChannel starts sending.
   virtual bool SetLocalMonitor(bool enable) = 0;
   // Installs a callback for raw frames from the local camera.
-  virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0;
 
   virtual const std::vector<AudioCodec>& audio_codecs() = 0;
   virtual const std::vector<RtpHeaderExtension>&
@@ -214,9 +211,6 @@
   virtual bool SetAudioOptions(const AudioOptions& options) {
     return voice_.SetOptions(options);
   }
-  virtual bool SetVideoOptions(const VideoOptions& options) {
-    return video_.SetOptions(options);
-  }
   virtual bool SetAudioDelayOffset(int offset) {
     return voice_.SetDelayOffset(offset);
   }
@@ -245,10 +239,6 @@
   virtual bool SetLocalMonitor(bool enable) {
     return voice_.SetLocalMonitor(enable);
   }
-  virtual bool SetLocalRenderer(VideoRenderer* renderer) {
-    return video_.SetLocalRenderer(renderer);
-  }
-
   virtual const std::vector<AudioCodec>& audio_codecs() {
     return voice_.codecs();
   }
@@ -361,7 +351,6 @@
   bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) {
     return true;
   }
-  bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
   const std::vector<VideoCodec>& codecs() { return codecs_; }
   const std::vector<RtpHeaderExtension>& rtp_header_extensions() {
     return rtp_header_extensions_;
diff --git a/media/base/testutils.cc b/media/base/testutils.cc
index 8b79df4..84fd05c 100644
--- a/media/base/testutils.cc
+++ b/media/base/testutils.cc
@@ -29,6 +29,7 @@
 
 #include <math.h>
 
+#include "talk/media/base/executablehelpers.h"
 #include "talk/media/base/rtpdump.h"
 #include "talk/media/base/videocapturer.h"
 #include "talk/media/base/videoframe.h"
@@ -255,10 +256,15 @@
 // Returns the absolute path to a file in the testdata/ directory.
 std::string GetTestFilePath(const std::string& filename) {
   // Locate test data directory.
+#ifdef ENABLE_WEBRTC
+  rtc::Pathname path = rtc::GetExecutablePath();
+  EXPECT_FALSE(path.empty());
+  path.AppendPathname("../../talk/");
+#else
   rtc::Pathname path = testing::GetTalkDirectory();
   EXPECT_FALSE(path.empty());  // must be run from inside "talk"
-  path.AppendFolder("media");
-  path.AppendFolder("testdata");
+#endif
+  path.AppendFolder("media/testdata/");
   path.SetFilename(filename);
   return path.pathname();
 }
diff --git a/media/base/videoadapter_unittest.cc b/media/base/videoadapter_unittest.cc
index af374e0..04bf3d1 100755
--- a/media/base/videoadapter_unittest.cc
+++ b/media/base/videoadapter_unittest.cc
@@ -69,17 +69,24 @@
         listener_.get(), &VideoCapturerListener::OnFrameCaptured);
   }
 
-  void VerifyAdaptedResolution(int width, int height) {
-    EXPECT_TRUE(NULL != listener_->adapted_frame());
-    EXPECT_EQ(static_cast<size_t>(width),
-              listener_->adapted_frame()->GetWidth());
-    EXPECT_EQ(static_cast<size_t>(height),
-              listener_->adapted_frame()->GetHeight());
+  virtual void TearDown() {
+    // Explicitly disconnect the VideoCapturer before to avoid data races
+    // (frames delivered to VideoCapturerListener while it's being destructed).
+    capturer_->SignalFrameCaptured.disconnect_all();
   }
 
  protected:
   class VideoCapturerListener: public sigslot::has_slots<> {
    public:
+    struct Stats {
+      int captured_frames;
+      int dropped_frames;
+      bool last_adapt_was_no_op;
+
+      int adapted_width;
+      int adapted_height;
+    };
+
     explicit VideoCapturerListener(VideoAdapter* adapter)
         : video_adapter_(adapter),
           adapted_frame_(NULL),
@@ -95,6 +102,7 @@
       EXPECT_TRUE(temp_i420.Init(captured_frame,
           captured_frame->width, abs(captured_frame->height)));
       VideoFrame* out_frame = NULL;
+      rtc::CritScope lock(&crit_);
       EXPECT_TRUE(video_adapter_->AdaptFrame(&temp_i420, &out_frame));
       if (out_frame) {
         if (out_frame == &temp_i420) {
@@ -112,12 +120,32 @@
       ++captured_frames_;
     }
 
-    const VideoFrame* adapted_frame() const { return adapted_frame_; }
-    int captured_frames() const { return captured_frames_; }
-    int dropped_frames() const { return dropped_frames_; }
-    bool last_adapt_was_no_op() const { return last_adapt_was_no_op_; }
+    Stats GetStats() {
+      rtc::CritScope lock(&crit_);
+      Stats stats;
+      stats.captured_frames = captured_frames_;
+      stats.dropped_frames = dropped_frames_;
+      stats.last_adapt_was_no_op = last_adapt_was_no_op_;
+      if (adapted_frame_ != NULL) {
+        stats.adapted_width = static_cast<int>(adapted_frame_->GetWidth());
+        stats.adapted_height = static_cast<int>(adapted_frame_->GetHeight());
+      } else {
+        stats.adapted_width = stats.adapted_height = -1;
+      }
+
+      return stats;
+    }
+
+    VideoFrame* CopyAdaptedFrame() {
+      rtc::CritScope lock(&crit_);
+      if (adapted_frame_ == NULL) {
+        return NULL;
+      }
+      return adapted_frame_->Copy();
+    }
 
    private:
+    rtc::CriticalSection crit_;
     VideoAdapter* video_adapter_;
     const VideoFrame* adapted_frame_;
     rtc::scoped_ptr<VideoFrame> copied_output_frame_;
@@ -135,6 +163,13 @@
     bool received_cpu_signal_;
   };
 
+  void VerifyAdaptedResolution(const VideoCapturerListener::Stats& stats,
+                               int width,
+                               int height) {
+    EXPECT_EQ(width, stats.adapted_width);
+    EXPECT_EQ(height, stats.adapted_height);
+  }
+
   rtc::scoped_ptr<FileVideoCapturer> capturer_;
   rtc::scoped_ptr<VideoAdapter> adapter_;
   rtc::scoped_ptr<VideoCapturerListener> listener_;
@@ -157,12 +192,13 @@
   // Call Adapter with some frames.
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no frame drop and no resolution change.
-  EXPECT_GE(listener_->captured_frames(), 10);
-  EXPECT_EQ(0, listener_->dropped_frames());
-  VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(0, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
 }
 
 // Do not adapt the frame rate or the resolution. Expect no frame drop and no
@@ -171,13 +207,14 @@
   adapter_->SetOutputFormat(capture_format_);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no frame drop and no resolution change.
-  EXPECT_GE(listener_->captured_frames(), 10);
-  EXPECT_EQ(0, listener_->dropped_frames());
-  VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
-  EXPECT_TRUE(listener_->last_adapt_was_no_op());
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(0, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
+  EXPECT_TRUE(stats.last_adapt_was_no_op);
 }
 
 TEST_F(VideoAdapterTest, AdaptZeroInterval) {
@@ -187,12 +224,13 @@
   adapter_->SetOutputFormat(format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no crash and that frames aren't dropped.
-  EXPECT_GE(listener_->captured_frames(), 10);
-  EXPECT_EQ(0, listener_->dropped_frames());
-  VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(0, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
 }
 
 // Adapt the frame rate to be half of the capture rate at the beginning. Expect
@@ -203,12 +241,13 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify frame drop and no resolution change.
-  EXPECT_GE(listener_->captured_frames(), 10);
-  EXPECT_EQ(listener_->captured_frames() / 2, listener_->dropped_frames());
-  VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(stats.captured_frames / 2, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
 }
 
 // Adapt the frame rate to be half of the capture rate at the beginning. Expect
@@ -219,13 +258,14 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 30, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 30, kWaitTimeout);
 
   // Verify frame drop and no resolution change.
-  EXPECT_GE(listener_->captured_frames(), 30);
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 30);
   // Verify 2 / 3 kept (20) and 1 / 3 dropped (10).
-  EXPECT_EQ(listener_->captured_frames() * 1 / 3, listener_->dropped_frames());
-  VerifyAdaptedResolution(capture_format_.width, capture_format_.height);
+  EXPECT_EQ(stats.captured_frames * 1 / 3, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, capture_format_.width, capture_format_.height);
 }
 
 // Adapt the frame rate to be half of the capture rate after capturing no less
@@ -236,20 +276,20 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no frame drop before adaptation.
-  EXPECT_EQ(0, listener_->dropped_frames());
+  EXPECT_EQ(0, listener_->GetStats().dropped_frames);
 
   // Adapat the frame rate.
   request_format.interval *= 2;
   adapter_->SetOutputFormat(request_format);
 
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 20, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 20, kWaitTimeout);
 
   // Verify frame drop after adaptation.
-  EXPECT_GT(listener_->dropped_frames(), 0);
+  EXPECT_GT(listener_->GetStats().dropped_frames, 0);
 }
 
 // Adapt the frame resolution to be a quarter of the capture resolution at the
@@ -261,11 +301,12 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no frame drop and resolution change.
-  EXPECT_EQ(0, listener_->dropped_frames());
-  VerifyAdaptedResolution(request_format.width, request_format.height);
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_EQ(0, stats.dropped_frames);
+  VerifyAdaptedResolution(stats, request_format.width, request_format.height);
 }
 
 // Adapt the frame resolution to half width. Expect resolution change.
@@ -276,10 +317,10 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify resolution change.
-  VerifyAdaptedResolution(213, 160);
+  VerifyAdaptedResolution(listener_->GetStats(), 213, 160);
 }
 
 // Adapt the frame resolution to half height. Expect resolution change.
@@ -290,10 +331,10 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify resolution change.
-  VerifyAdaptedResolution(213, 160);
+  VerifyAdaptedResolution(listener_->GetStats(), 213, 160);
 }
 
 // Adapt the frame resolution to be a quarter of the capture resolution after
@@ -304,21 +345,25 @@
   adapter_->SetOutputFormat(request_format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify no resolution change before adaptation.
-  VerifyAdaptedResolution(request_format.width, request_format.height);
+  VerifyAdaptedResolution(
+      listener_->GetStats(), request_format.width, request_format.height);
 
   // Adapt the frame resolution.
   request_format.width /= 2;
   request_format.height /= 2;
   adapter_->SetOutputFormat(request_format);
-  EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 20, kWaitTimeout);
-
+  int captured_frames = listener_->GetStats().captured_frames;
+  EXPECT_TRUE_WAIT(
+      !capturer_->IsRunning() ||
+          listener_->GetStats().captured_frames >= captured_frames + 10,
+      kWaitTimeout);
 
   // Verify resolution change after adaptation.
-  VerifyAdaptedResolution(request_format.width, request_format.height);
+  VerifyAdaptedResolution(
+      listener_->GetStats(), request_format.width, request_format.height);
 }
 
 // Black the output frame.
@@ -326,42 +371,57 @@
   adapter_->SetOutputFormat(capture_format_);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
   // Verify that the output frame is not black.
-  EXPECT_NE(16, *listener_->adapted_frame()->GetYPlane());
-  EXPECT_NE(128, *listener_->adapted_frame()->GetUPlane());
-  EXPECT_NE(128, *listener_->adapted_frame()->GetVPlane());
+  rtc::scoped_ptr<VideoFrame> adapted_frame(listener_->CopyAdaptedFrame());
+  EXPECT_NE(16, *adapted_frame->GetYPlane());
+  EXPECT_NE(128, *adapted_frame->GetUPlane());
+  EXPECT_NE(128, *adapted_frame->GetVPlane());
 
   adapter_->SetBlackOutput(true);
-  EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 20, kWaitTimeout);
+  int captured_frames = listener_->GetStats().captured_frames;
+  EXPECT_TRUE_WAIT(
+      !capturer_->IsRunning() ||
+          listener_->GetStats().captured_frames >= captured_frames + 10,
+      kWaitTimeout);
   // Verify that the output frame is black.
-  EXPECT_EQ(16, *listener_->adapted_frame()->GetYPlane());
-  EXPECT_EQ(128, *listener_->adapted_frame()->GetUPlane());
-  EXPECT_EQ(128, *listener_->adapted_frame()->GetVPlane());
+  adapted_frame.reset(listener_->CopyAdaptedFrame());
+  EXPECT_EQ(16, *adapted_frame->GetYPlane());
+  EXPECT_EQ(128, *adapted_frame->GetUPlane());
+  EXPECT_EQ(128, *adapted_frame->GetVPlane());
 
   // Verify that the elapsed time and timestamp of the black frame increase.
-  int64 elapsed_time = listener_->adapted_frame()->GetElapsedTime();
-  int64 timestamp = listener_->adapted_frame()->GetTimeStamp();
-  EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 22, kWaitTimeout);
-  EXPECT_GT(listener_->adapted_frame()->GetElapsedTime(), elapsed_time);
-  EXPECT_GT(listener_->adapted_frame()->GetTimeStamp(), timestamp);
+  int64 elapsed_time = adapted_frame->GetElapsedTime();
+  int64 timestamp = adapted_frame->GetTimeStamp();
+  captured_frames = listener_->GetStats().captured_frames;
+  EXPECT_TRUE_WAIT(
+      !capturer_->IsRunning() ||
+          listener_->GetStats().captured_frames >= captured_frames + 10,
+      kWaitTimeout);
+
+  adapted_frame.reset(listener_->CopyAdaptedFrame());
+  EXPECT_GT(adapted_frame->GetElapsedTime(), elapsed_time);
+  EXPECT_GT(adapted_frame->GetTimeStamp(), timestamp);
 
   // Change the output size
   VideoFormat request_format = capture_format_;
   request_format.width /= 2;
   request_format.height /= 2;
   adapter_->SetOutputFormat(request_format);
+  captured_frames = listener_->GetStats().captured_frames;
+  EXPECT_TRUE_WAIT(
+      !capturer_->IsRunning() ||
+          listener_->GetStats().captured_frames >= captured_frames + 10,
+      kWaitTimeout);
 
-  EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 40, kWaitTimeout);
   // Verify resolution change after adaptation.
-  VerifyAdaptedResolution(request_format.width, request_format.height);
+  VerifyAdaptedResolution(
+      listener_->GetStats(), request_format.width, request_format.height);
   // Verify that the output frame is black.
-  EXPECT_EQ(16, *listener_->adapted_frame()->GetYPlane());
-  EXPECT_EQ(128, *listener_->adapted_frame()->GetUPlane());
-  EXPECT_EQ(128, *listener_->adapted_frame()->GetVPlane());
+  adapted_frame.reset(listener_->CopyAdaptedFrame());
+  EXPECT_EQ(16, *adapted_frame->GetYPlane());
+  EXPECT_EQ(128, *adapted_frame->GetUPlane());
+  EXPECT_EQ(128, *adapted_frame->GetVPlane());
 }
 
 // Drop all frames.
@@ -370,11 +430,12 @@
   adapter_->SetOutputFormat(format);
   EXPECT_EQ(CS_RUNNING, capturer_->Start(capture_format_));
   EXPECT_TRUE_WAIT(!capturer_->IsRunning() ||
-                   listener_->captured_frames() >= 10, kWaitTimeout);
+                   listener_->GetStats().captured_frames >= 10, kWaitTimeout);
 
   // Verify all frames are dropped.
-  EXPECT_GE(listener_->captured_frames(), 10);
-  EXPECT_EQ(listener_->captured_frames(), listener_->dropped_frames());
+  VideoCapturerListener::Stats stats = listener_->GetStats();
+  EXPECT_GE(stats.captured_frames, 10);
+  EXPECT_EQ(stats.captured_frames, stats.dropped_frames);
 }
 
 TEST(CoordinatedVideoAdapterTest, TestCoordinatedWithoutCpuAdaptation) {
diff --git a/media/base/videoengine_unittest.h b/media/base/videoengine_unittest.h
index 0f03c7b..8eab347 100644
--- a/media/base/videoengine_unittest.h
+++ b/media/base/videoengine_unittest.h
@@ -1593,6 +1593,25 @@
     frame_count += 2;
     EXPECT_EQ_WAIT(frame_count, renderer_.num_rendered_frames(), kTimeout);
   }
+  // Tests that adapted frames won't be upscaled to a higher resolution.
+  void SendsLowerResolutionOnSmallerFrames() {
+    cricket::VideoCodec codec = DefaultCodec();
+    codec.width = 320;
+    codec.height = 240;
+    EXPECT_TRUE(SetOneCodec(codec));
+    EXPECT_TRUE(SetSend(true));
+    EXPECT_TRUE(channel_->SetRender(true));
+    EXPECT_TRUE(channel_->SetRenderer(kDefaultReceiveSsrc, &renderer_));
+    EXPECT_EQ(0, renderer_.num_rendered_frames());
+    EXPECT_TRUE(SendFrame());
+    EXPECT_FRAME_WAIT(1, codec.width, codec.height, kTimeout);
+
+    // Check that we send smaller frames at the new resolution.
+    EXPECT_TRUE(rtc::Thread::Current()->ProcessMessages(33));
+    EXPECT_TRUE(video_capturer_->CaptureCustomFrame(
+        codec.width / 2, codec.height / 2, cricket::FOURCC_I420));
+    EXPECT_FRAME_WAIT(2, codec.width / 2, codec.height / 2, kTimeout);
+  }
   // Tests that we can set the send stream format properly.
   void SetSendStreamFormat() {
     cricket::VideoCodec codec(DefaultCodec());
diff --git a/media/base/videoframe.cc b/media/base/videoframe.cc
index 1c5cfd8..018d065 100644
--- a/media/base/videoframe.cc
+++ b/media/base/videoframe.cc
@@ -235,7 +235,7 @@
 }
 
 static const size_t kMaxSampleSize = 1000000000u;
-// Returns whether a sample is valid
+// Returns whether a sample is valid.
 bool VideoFrame::Validate(uint32 fourcc, int w, int h,
                           const uint8 *sample, size_t sample_size) {
   if (h < 0) {
@@ -311,6 +311,11 @@
                   << " " << sample_size;
     return false;
   }
+  // TODO(fbarchard): Make function to dump information about frames.
+  uint8 four_samples[4] = { 0, 0, 0, 0 };
+  for (size_t i = 0; i < ARRAY_SIZE(four_samples) && i < sample_size; ++i) {
+    four_samples[i] = sample[i];
+  }
   if (sample_size < expected_size) {
     LOG(LS_ERROR) << "Size field is too small."
                   << " format: " << GetFourccName(format)
@@ -318,10 +323,10 @@
                   << " size: " << w << "x" << h
                   << " " << sample_size
                   << " expected: " << expected_size
-                  << " sample[0..3]: " << static_cast<int>(sample[0])
-                  << ", " << static_cast<int>(sample[1])
-                  << ", " << static_cast<int>(sample[2])
-                  << ", " << static_cast<int>(sample[3]);
+                  << " sample[0..3]: " << static_cast<int>(four_samples[0])
+                  << ", " << static_cast<int>(four_samples[1])
+                  << ", " << static_cast<int>(four_samples[2])
+                  << ", " << static_cast<int>(four_samples[3]);
     return false;
   }
   if (sample_size > kMaxSampleSize) {
@@ -331,13 +336,14 @@
                     << " size: " << w << "x" << h
                     << " " << sample_size
                     << " expected: " << 2 * expected_size
-                    << " sample[0..3]: " << static_cast<int>(sample[0])
-                    << ", " << static_cast<int>(sample[1])
-                    << ", " << static_cast<int>(sample[2])
-                    << ", " << static_cast<int>(sample[3]);
+                    << " sample[0..3]: " << static_cast<int>(four_samples[0])
+                    << ", " << static_cast<int>(four_samples[1])
+                    << ", " << static_cast<int>(four_samples[2])
+                    << ", " << static_cast<int>(four_samples[3]);
     return false;
   }
   // Show large size warning once every 100 frames.
+  // TODO(fbarchard): Make frame counter atomic for thread safety.
   static int large_warn100 = 0;
   size_t large_expected_size = expected_size * 2;
   if (expected_bpp >= 8 &&
@@ -350,27 +356,14 @@
                     << " size: " << w << "x" << h
                     << " bytes: " << sample_size
                     << " expected: " << large_expected_size
-                    << " sample[0..3]: " << static_cast<int>(sample[0])
-                    << ", " << static_cast<int>(sample[1])
-                    << ", " << static_cast<int>(sample[2])
-                    << ", " << static_cast<int>(sample[3]);
-  }
-  // Scan pages to ensure they are there and don't contain a single value and
-  // to generate an error.
-  if (!memcmp(sample + sample_size - 8, sample + sample_size - 4, 4) &&
-      !memcmp(sample, sample + 4, sample_size - 4)) {
-    LOG(LS_WARNING) << "Duplicate value for all pixels."
-                    << " format: " << GetFourccName(format)
-                    << " bpp: " << expected_bpp
-                    << " size: " << w << "x" << h
-                    << " bytes: " << sample_size
-                    << " expected: " << expected_size
-                    << " sample[0..3]: " << static_cast<int>(sample[0])
-                    << ", " << static_cast<int>(sample[1])
-                    << ", " << static_cast<int>(sample[2])
-                    << ", " << static_cast<int>(sample[3]);
+                    << " sample[0..3]: " << static_cast<int>(four_samples[0])
+                    << ", " << static_cast<int>(four_samples[1])
+                    << ", " << static_cast<int>(four_samples[2])
+                    << ", " << static_cast<int>(four_samples[3]);
   }
 
+  // TODO(fbarchard): Add duplicate pixel check.
+  // TODO(fbarchard): Use frame counter atomic for thread safety.
   static bool valid_once = true;
   if (valid_once) {
     valid_once = false;
@@ -380,10 +373,10 @@
                  << " size: " << w << "x" << h
                  << " bytes: " << sample_size
                  << " expected: " << expected_size
-                 << " sample[0..3]: " << static_cast<int>(sample[0])
-                 << ", " << static_cast<int>(sample[1])
-                 << ", " << static_cast<int>(sample[2])
-                 << ", " << static_cast<int>(sample[3]);
+                 << " sample[0..3]: " << static_cast<int>(four_samples[0])
+                 << ", " << static_cast<int>(four_samples[1])
+                 << ", " << static_cast<int>(four_samples[2])
+                 << ", " << static_cast<int>(four_samples[3]);
   }
   return true;
 }
diff --git a/media/base/videoframe_unittest.h b/media/base/videoframe_unittest.h
index c4a7a8c..483fc34 100644
--- a/media/base/videoframe_unittest.h
+++ b/media/base/videoframe_unittest.h
@@ -135,6 +135,9 @@
     rtc::scoped_ptr<rtc::FileStream> fs(
         rtc::Filesystem::OpenFile(path, "rb"));
     if (!fs.get()) {
+      LOG(LS_ERROR) << "Could not open test file path: " << path.pathname()
+                    << " from current dir "
+                    << rtc::Filesystem::GetCurrentDirectory().pathname();
       return NULL;
     }
 
@@ -143,6 +146,7 @@
         new rtc::MemoryStream());
     rtc::StreamResult res = Flow(fs.get(), buf, sizeof(buf), ms.get());
     if (res != rtc::SR_SUCCESS) {
+      LOG(LS_ERROR) << "Could not load test file path: " << path.pathname();
       return NULL;
     }
 
@@ -419,17 +423,22 @@
                       const uint8* u, uint32 upitch,
                       const uint8* v, uint32 vpitch,
                       int max_error) {
-    return IsSize(frame, width, height) &&
+    return IsSize(frame,
+                  static_cast<uint32>(width),
+                  static_cast<uint32>(height)) &&
         frame.GetPixelWidth() == pixel_width &&
         frame.GetPixelHeight() == pixel_height &&
         frame.GetElapsedTime() == elapsed_time &&
         frame.GetTimeStamp() == time_stamp &&
         IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch,
-                     width, height, max_error) &&
+                     static_cast<uint32>(width),
+                     static_cast<uint32>(height), max_error) &&
         IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch,
-                     (width + 1) / 2, (height + 1) / 2, max_error) &&
+                     static_cast<uint32>((width + 1) / 2),
+                     static_cast<uint32>((height + 1) / 2), max_error) &&
         IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch,
-                     (width + 1) / 2, (height + 1) / 2, max_error);
+                     static_cast<uint32>((width + 1) / 2),
+                     static_cast<uint32>((height + 1) / 2), max_error);
   }
 
   static bool IsEqual(const cricket::VideoFrame& frame1,
@@ -719,7 +728,7 @@
     T frame1, frame2;
     size_t out_size = kWidth * kHeight * 2;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
-    uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = ALIGNP(outbuf.get(), kAlignment);
     T frame;
     ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
     EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBP,
@@ -735,7 +744,7 @@
     T frame1, frame2;
     size_t out_size = kWidth * kHeight * 2;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
-    uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = ALIGNP(outbuf.get(), kAlignment);
     T frame;
     ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
     EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBO,
@@ -751,7 +760,7 @@
     T frame1, frame2;
     size_t out_size = kWidth * kHeight * 2;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
-    uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = ALIGNP(outbuf.get(), kAlignment);
     T frame;
     ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
     EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_R444,
@@ -771,12 +780,12 @@
     size_t bayer_size = kWidth * kHeight;                                      \
     rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[                         \
         bayer_size + kAlignment]);                                             \
-    uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment);                         \
+    uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment);                         \
     T frame1, frame2;                                                          \
     rtc::scoped_ptr<rtc::MemoryStream> ms(                         \
         CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight));               \
     ASSERT_TRUE(ms.get() != NULL);                                             \
-    libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8 *>(ms->GetBuffer()),     \
+    libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8* >(ms->GetBuffer()),     \
                                kWidth * 4,                                     \
                                bayer, kWidth,                                  \
                                kWidth, kHeight);                               \
@@ -812,8 +821,8 @@
                             reinterpret_cast<uint8*>(ms->GetBuffer()),         \
                             data_size,                                         \
                             1, 1, 0, 0, 0));                                   \
-    int width_rotate = frame1.GetWidth();                                      \
-    int height_rotate = frame1.GetHeight();                                    \
+    int width_rotate = static_cast<int>(frame1.GetWidth());                    \
+    int height_rotate = static_cast<int>(frame1.GetHeight());                  \
     EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0));  \
     libyuv::I420Mirror(frame2.GetYPlane(), frame2.GetYPitch(),                 \
                        frame2.GetUPlane(), frame2.GetUPitch(),                 \
@@ -845,8 +854,8 @@
                             reinterpret_cast<uint8*>(ms->GetBuffer()),         \
                             data_size,                                         \
                             1, 1, 0, 0, 0));                                   \
-    int width_rotate = frame1.GetWidth();                                      \
-    int height_rotate = frame1.GetHeight();                                    \
+    int width_rotate = static_cast<int>(frame1.GetWidth());                    \
+    int height_rotate = static_cast<int>(frame1.GetHeight());                  \
     EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0));  \
     libyuv::I420Rotate(frame2.GetYPlane(), frame2.GetYPitch(),                 \
                        frame2.GetUPlane(), frame2.GetUPitch(),                 \
@@ -995,7 +1004,7 @@
     // Convert back to ARGB.
     size_t out_size = 4;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
-    uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = ALIGNP(outbuf.get(), kAlignment);
 
     EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
                                                  out,
@@ -1032,7 +1041,7 @@
     // Convert back to ARGB
     size_t out_size = 10 * 4;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]);
-    uint8 *out = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = ALIGNP(outbuf.get(), kAlignment);
 
     EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
                                                  out,
@@ -1431,8 +1440,8 @@
     size_t out_size = astride * kHeight;
     rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment + 1]);
     memset(outbuf.get(), 0, out_size + kAlignment + 1);
-    uint8 *outtop = ALIGNP(outbuf.get(), kAlignment);
-    uint8 *out = outtop;
+    uint8* outtop = ALIGNP(outbuf.get(), kAlignment);
+    uint8* out = outtop;
     int stride = astride;
     if (invert) {
       out += (kHeight - 1) * stride;  // Point to last row.
@@ -1869,7 +1878,7 @@
     size_t bayer_size = kWidth * kHeight;                                      \
     rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[                         \
         bayer_size + kAlignment]);                                             \
-    uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment);                         \
+    uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment);                         \
     T frame;                                                                   \
     rtc::scoped_ptr<rtc::MemoryStream> ms(                         \
         CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight));               \
@@ -1898,7 +1907,7 @@
     size_t bayer_size = kWidth * kHeight;                                      \
     rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[                         \
         bayer_size + 1 + kAlignment]);                                         \
-    uint8 *bayer = ALIGNP(bayerbuf.get(), kAlignment) + 1;                     \
+    uint8* bayer = ALIGNP(bayerbuf.get(), kAlignment) + 1;                     \
     T frame;                                                                   \
     rtc::scoped_ptr<rtc::MemoryStream> ms(                         \
         CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight));               \
@@ -1935,7 +1944,7 @@
     size_t bayer_size = kWidth * kHeight;                                      \
     rtc::scoped_ptr<uint8[]> bayerbuf(new uint8[                         \
         bayer_size + kAlignment]);                                             \
-    uint8 *bayer1 = ALIGNP(bayerbuf.get(), kAlignment);                        \
+    uint8* bayer1 = ALIGNP(bayerbuf.get(), kAlignment);                        \
     for (int i = 0; i < kWidth * kHeight; ++i) {                               \
       bayer1[i] = static_cast<uint8>(i * 33u + 183u);                          \
     }                                                                          \
@@ -1951,7 +1960,7 @@
     }                                                                          \
     rtc::scoped_ptr<uint8[]> bayer2buf(new uint8[                        \
         bayer_size + kAlignment]);                                             \
-    uint8 *bayer2 = ALIGNP(bayer2buf.get(), kAlignment);                       \
+    uint8* bayer2 = ALIGNP(bayer2buf.get(), kAlignment);                       \
     libyuv::ARGBToBayer##BAYER(reinterpret_cast<uint8*>(ms->GetBuffer()),      \
                            kWidth * 4,                                         \
                            bayer2, kWidth,                                     \
diff --git a/media/devices/linuxdevicemanager.cc b/media/devices/linuxdevicemanager.cc
index a79e226..d122169 100644
--- a/media/devices/linuxdevicemanager.cc
+++ b/media/devices/linuxdevicemanager.cc
@@ -31,10 +31,10 @@
 #include "talk/media/base/mediacommon.h"
 #include "talk/media/devices/libudevsymboltable.h"
 #include "talk/media/devices/v4llookup.h"
-#include "talk/sound/platformsoundsystem.h"
-#include "talk/sound/platformsoundsystemfactory.h"
-#include "talk/sound/sounddevicelocator.h"
-#include "talk/sound/soundsysteminterface.h"
+#include "webrtc/sound/platformsoundsystem.h"
+#include "webrtc/sound/platformsoundsystemfactory.h"
+#include "webrtc/sound/sounddevicelocator.h"
+#include "webrtc/sound/soundsysteminterface.h"
 #include "webrtc/base/fileutils.h"
 #include "webrtc/base/linux.h"
 #include "webrtc/base/logging.h"
@@ -89,7 +89,7 @@
 };
 
 LinuxDeviceManager::LinuxDeviceManager()
-    : sound_system_(new PlatformSoundSystemFactory()) {
+    : sound_system_(new rtc::PlatformSoundSystemFactory()) {
   set_watcher(new LinuxDeviceWatcher(this));
 }
 
@@ -102,7 +102,7 @@
   if (!sound_system_.get()) {
     return false;
   }
-  SoundSystemInterface::SoundDeviceLocatorList list;
+  rtc::SoundSystemInterface::SoundDeviceLocatorList list;
   bool success;
   if (input) {
     success = sound_system_->EnumerateCaptureDevices(&list);
@@ -118,12 +118,12 @@
   // device at index 0, but Enumerate(Capture|Playback)Devices does not include
   // a locator for the default device.
   int index = 1;
-  for (SoundSystemInterface::SoundDeviceLocatorList::iterator i = list.begin();
+  for (rtc::SoundSystemInterface::SoundDeviceLocatorList::iterator i = list.begin();
        i != list.end();
        ++i, ++index) {
     devs->push_back(Device((*i)->name(), index));
   }
-  SoundSystemInterface::ClearSoundDeviceLocatorList(&list);
+  rtc::SoundSystemInterface::ClearSoundDeviceLocatorList(&list);
   sound_system_.release();
   return FilterDevices(devs, kFilteredAudioDevicesName);
 }
diff --git a/media/devices/linuxdevicemanager.h b/media/devices/linuxdevicemanager.h
index 88aee4e..1eb648f 100644
--- a/media/devices/linuxdevicemanager.h
+++ b/media/devices/linuxdevicemanager.h
@@ -32,7 +32,7 @@
 #include <vector>
 
 #include "talk/media/devices/devicemanager.h"
-#include "talk/sound/soundsystemfactory.h"
+#include "webrtc/sound/soundsystemfactory.h"
 #include "webrtc/base/sigslot.h"
 #include "webrtc/base/stringencode.h"
 
@@ -47,7 +47,7 @@
 
  private:
   virtual bool GetAudioDevices(bool input, std::vector<Device>* devs);
-  SoundSystemHandle sound_system_;
+  rtc::SoundSystemHandle sound_system_;
 };
 
 }  // namespace cricket
diff --git a/media/devices/macdevicemanager.cc b/media/devices/macdevicemanager.cc
index 568ee53..8f777b4 100644
--- a/media/devices/macdevicemanager.cc
+++ b/media/devices/macdevicemanager.cc
@@ -71,7 +71,7 @@
 extern DeviceWatcherImpl* CreateDeviceWatcherCallback(
     DeviceManagerInterface* dm);
 extern void ReleaseDeviceWatcherCallback(DeviceWatcherImpl* impl);
-extern bool GetQTKitVideoDevices(std::vector<Device>* out);
+extern bool GetAVFoundationVideoDevices(std::vector<Device>* out);
 static bool GetAudioDeviceIDs(bool inputs, std::vector<AudioDeviceID>* out);
 static bool GetAudioDeviceName(AudioDeviceID id, bool input, std::string* out);
 
@@ -84,7 +84,7 @@
 
 bool MacDeviceManager::GetVideoCaptureDevices(std::vector<Device>* devices) {
   devices->clear();
-  if (!GetQTKitVideoDevices(devices)) {
+  if (!GetAVFoundationVideoDevices(devices)) {
     return false;
   }
   return FilterDevices(devices, kFilteredVideoDevicesName);
diff --git a/media/devices/macdevicemanagermm.mm b/media/devices/macdevicemanagermm.mm
index 3091ec4..cfcf5a4 100644
--- a/media/devices/macdevicemanagermm.mm
+++ b/media/devices/macdevicemanagermm.mm
@@ -33,6 +33,11 @@
 #include "talk/media/devices/devicemanager.h"
 
 #import <assert.h>
+#ifdef __MAC_OS_X_VERSION_MAX_ALLOWED
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+  #import <AVFoundation/AVFoundation.h>
+#endif
+#endif
 #import <QTKit/QTKit.h>
 
 #include "webrtc/base/logging.h"
@@ -136,4 +141,52 @@
   return true;
 }
 
+bool GetAVFoundationVideoDevices(std::vector<Device>* devices) {
+#ifdef __MAC_OS_X_VERSION_MAX_ALLOWED
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >=1070
+  if (![AVCaptureDevice class]) {
+    // Fallback to using QTKit if AVFoundation is not available
+    return GetQTKitVideoDevices(devices);
+  }
+#if !__has_feature(objc_arc)
+  NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
+#else
+  @autoreleasepool
+#endif
+  {
+    NSArray* capture_devices = [AVCaptureDevice devices];
+    LOG(LS_INFO) << [capture_devices count] << " capture device(s) found:";
+    for (AVCaptureDevice* capture_device in capture_devices) {
+      if ([capture_device hasMediaType:AVMediaTypeVideo] ||
+          [capture_device hasMediaType:AVMediaTypeMuxed]) {
+        static NSString* const kFormat = @"localizedName: \"%@\", "
+            @"modelID: \"%@\", uniqueID \"%@\", isConnected: %d, "
+            @"isInUseByAnotherApplication: %d";
+        NSString* info = [NSString
+            stringWithFormat:kFormat,
+                             [capture_device localizedName],
+                             [capture_device modelID],
+                             [capture_device uniqueID],
+                             [capture_device isConnected],
+                             [capture_device isInUseByAnotherApplication]];
+        LOG(LS_INFO) << [info UTF8String];
+
+        std::string name([[capture_device localizedName] UTF8String]);
+        devices->push_back(
+            Device(name, [[capture_device uniqueID] UTF8String]));
+      }
+    }
+  }
+#if !__has_feature(objc_arc)
+  [pool drain];
+#endif
+  return true;
+#else  // __MAC_OS_X_VERSION_MAX_ALLOWED >=1070
+  return GetQTKitVideoDevices(devices);
+#endif  // __MAC_OS_X_VERSION_MAX_ALLOWED >=1070
+#else  // __MAC_OS_X_VERSION_MAX_ALLOWED
+  return GetQTKitVideoDevices(devices);
+#endif  // __MAC_OS_X_VERSION_MAX_ALLOWED
+}
+
 }  // namespace cricket
diff --git a/media/other/linphonemediaengine.h b/media/other/linphonemediaengine.h
index e4ba345..b4950de 100644
--- a/media/other/linphonemediaengine.h
+++ b/media/other/linphonemediaengine.h
@@ -70,7 +70,6 @@
   virtual VideoMediaChannel* CreateVideoChannel(VoiceMediaChannel* voice_ch);
   virtual SoundclipMedia* CreateSoundclip() { return NULL; }
   virtual bool SetAudioOptions(int options) { return true; }
-  virtual bool SetVideoOptions(int options) { return true; }
   virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) {
     return true;
   }
@@ -81,7 +80,6 @@
   virtual bool SetOutputVolume(int level) { return true; }
   virtual int GetInputLevel() { return 0; }
   virtual bool SetLocalMonitor(bool enable) { return true; }
-  virtual bool SetLocalRenderer(VideoRenderer* renderer) { return true; }
   // TODO: control channel send?
   virtual bool SetVideoCapture(bool capture) { return true; }
   virtual const std::vector<AudioCodec>& audio_codecs() {
diff --git a/media/sctp/sctpdataengine_unittest.cc b/media/sctp/sctpdataengine_unittest.cc
index 0fb9b9d..fb00650 100644
--- a/media/sctp/sctpdataengine_unittest.cc
+++ b/media/sctp/sctpdataengine_unittest.cc
@@ -45,6 +45,11 @@
 #include "webrtc/base/ssladapter.h"
 #include "webrtc/base/thread.h"
 
+#ifdef HAVE_NSS_SSL_H
+// TODO(thorcarpenter): Remove after webrtc switches over to BoringSSL.
+#include "webrtc/base/nssstreamadapter.h"
+#endif  // HAVE_NSS_SSL_H
+
 enum {
   MSG_PACKET = 1,
 };
@@ -69,8 +74,7 @@
 
     // TODO(ldixon): Can/should we use Buffer.TransferTo here?
     // Note: this assignment does a deep copy of data from packet.
-    rtc::Buffer* buffer = new rtc::Buffer(packet->data(),
-                                                      packet->length());
+    rtc::Buffer* buffer = new rtc::Buffer(packet->data(), packet->length());
     thread_->Post(this, MSG_PACKET, rtc::WrapMessageData(buffer));
     LOG(LS_VERBOSE) << "SctpFakeNetworkInterface::SendPacket, Posted message.";
     return true;
@@ -219,6 +223,12 @@
   // usrsctp uses the NSS random number generator on non-Android platforms,
   // so we need to initialize SSL.
   static void SetUpTestCase() {
+#ifdef HAVE_NSS_SSL_H
+  // TODO(thorcarpenter): Remove after webrtc switches over to BoringSSL.
+  if (!rtc::NSSContext::InitializeSSL(NULL)) {
+    LOG(LS_WARNING) << "Unabled to initialize NSS.";
+  }
+#endif  // HAVE_NSS_SSL_H
     rtc::InitializeSSL();
   }
 
@@ -266,6 +276,10 @@
   virtual void TearDown() {
     channel1()->SetSend(false);
     channel2()->SetSend(false);
+
+    // Process messages until idle to prevent a sent packet from being dropped
+    // and causing memory leaks (not being deleted by the receiver).
+    ProcessMessagesUntilIdle();
   }
 
   void AddStream(int ssrc) {
diff --git a/media/testdata/faces.1280x720_P420.yuv b/media/testdata/faces.1280x720_P420.yuv
new file mode 100644
index 0000000..f4844ee
--- /dev/null
+++ b/media/testdata/faces.1280x720_P420.yuv
Binary files differ
diff --git a/media/testdata/faces_I400.jpg b/media/testdata/faces_I400.jpg
new file mode 100644
index 0000000..c928f00
--- /dev/null
+++ b/media/testdata/faces_I400.jpg
Binary files differ
diff --git a/media/testdata/faces_I411.jpg b/media/testdata/faces_I411.jpg
new file mode 100644
index 0000000..600c122
--- /dev/null
+++ b/media/testdata/faces_I411.jpg
Binary files differ
diff --git a/media/testdata/faces_I420.jpg b/media/testdata/faces_I420.jpg
new file mode 100644
index 0000000..10c5332
--- /dev/null
+++ b/media/testdata/faces_I420.jpg
Binary files differ
diff --git a/media/testdata/faces_I422.jpg b/media/testdata/faces_I422.jpg
new file mode 100644
index 0000000..9907aa1
--- /dev/null
+++ b/media/testdata/faces_I422.jpg
Binary files differ
diff --git a/media/testdata/faces_I444.jpg b/media/testdata/faces_I444.jpg
new file mode 100644
index 0000000..3422755
--- /dev/null
+++ b/media/testdata/faces_I444.jpg
Binary files differ
diff --git a/media/webrtc/OWNERS b/media/webrtc/OWNERS
index 9a3546e..aef5939 100644
--- a/media/webrtc/OWNERS
+++ b/media/webrtc/OWNERS
@@ -1,3 +1,3 @@
 mflodman@webrtc.org
+pbos@webrtc.org
 pthatcher@webrtc.org
-wu@webrtc.org
diff --git a/media/webrtc/fakewebrtcvideocapturemodule.h b/media/webrtc/fakewebrtcvideocapturemodule.h
index 347e4b7..82b5cdd 100644
--- a/media/webrtc/fakewebrtcvideocapturemodule.h
+++ b/media/webrtc/fakewebrtcvideocapturemodule.h
@@ -44,76 +44,72 @@
         running_(false),
         delay_(0) {
   }
-  virtual int32_t Version(char* version,
-                          uint32_t& remaining_buffer_in_bytes,
-                          uint32_t& position) const {
+  virtual int32_t TimeUntilNextProcess() OVERRIDE {
     return 0;
   }
-  virtual int32_t TimeUntilNextProcess() {
+  virtual int32_t Process() OVERRIDE {
     return 0;
   }
-  virtual int32_t Process() {
-    return 0;
-  }
-  virtual int32_t ChangeUniqueId(const int32_t id) {
+  virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE {
     id_ = id;
     return 0;
   }
   virtual void RegisterCaptureDataCallback(
-      webrtc::VideoCaptureDataCallback& callback) {
+      webrtc::VideoCaptureDataCallback& callback) OVERRIDE {
     callback_ = &callback;
   }
-  virtual void DeRegisterCaptureDataCallback() { callback_ = NULL; }
-  virtual void RegisterCaptureCallback(webrtc::VideoCaptureFeedBack& callback) {
+  virtual void DeRegisterCaptureDataCallback() OVERRIDE { callback_ = NULL; }
+  virtual void RegisterCaptureCallback(
+      webrtc::VideoCaptureFeedBack& callback) OVERRIDE {
     // Not implemented.
   }
-  virtual void DeRegisterCaptureCallback() {
+  virtual void DeRegisterCaptureCallback() OVERRIDE {
     // Not implemented.
   }
-  virtual void SetCaptureDelay(int32_t delay) { delay_ = delay; }
-  virtual int32_t CaptureDelay() { return delay_; }
-  virtual void EnableFrameRateCallback(const bool enable) {
+  virtual void SetCaptureDelay(int32_t delay) OVERRIDE { delay_ = delay; }
+  virtual int32_t CaptureDelay() OVERRIDE { return delay_; }
+  virtual void EnableFrameRateCallback(const bool enable) OVERRIDE {
     // not implemented
   }
-  virtual void EnableNoPictureAlarm(const bool enable) {
+  virtual void EnableNoPictureAlarm(const bool enable) OVERRIDE {
     // not implemented
   }
   virtual int32_t StartCapture(
-      const webrtc::VideoCaptureCapability& cap) {
+      const webrtc::VideoCaptureCapability& cap) OVERRIDE {
     if (running_) return -1;
     cap_ = cap;
     running_ = true;
     return 0;
   }
-  virtual int32_t StopCapture() {
+  virtual int32_t StopCapture() OVERRIDE {
     running_ = false;
     return 0;
   }
-  virtual const char* CurrentDeviceName() const {
+  virtual const char* CurrentDeviceName() const OVERRIDE {
     return NULL;  // not implemented
   }
-  virtual bool CaptureStarted() {
+  virtual bool CaptureStarted() OVERRIDE {
     return running_;
   }
   virtual int32_t CaptureSettings(
-      webrtc::VideoCaptureCapability& settings) {
+      webrtc::VideoCaptureCapability& settings) OVERRIDE {
     if (!running_) return -1;
     settings = cap_;
     return 0;
   }
 
   virtual int32_t SetCaptureRotation(
-      webrtc::VideoCaptureRotation rotation) {
+      webrtc::VideoCaptureRotation rotation) OVERRIDE {
     return -1;  // not implemented
   }
   virtual VideoCaptureEncodeInterface* GetEncodeInterface(
-      const webrtc::VideoCodec& codec) {
+      const webrtc::VideoCodec& codec) OVERRIDE {
     return NULL;  // not implemented
   }
-  virtual int32_t AddRef() {
+  virtual int32_t AddRef() OVERRIDE {
     return 0;
   }
-  virtual int32_t Release() {
+  virtual int32_t Release() OVERRIDE {
     delete this;
     return 0;
   }
diff --git a/media/webrtc/fakewebrtcvideoengine.h b/media/webrtc/fakewebrtcvideoengine.h
index 5cba380..ec4b9c6 100644
--- a/media/webrtc/fakewebrtcvideoengine.h
+++ b/media/webrtc/fakewebrtcvideoengine.h
@@ -335,6 +335,7 @@
     unsigned int send_nack_bitrate_;
     unsigned int send_bandwidth_;
     unsigned int receive_bandwidth_;
+    // Bandwidth to deduct from estimated uplink capacity.
     unsigned int reserved_transmit_bitrate_bps_;
     bool suspend_below_min_bitrate_;
     webrtc::CpuOveruseObserver* overuse_observer_;
diff --git a/media/webrtc/fakewebrtcvoiceengine.h b/media/webrtc/fakewebrtcvoiceengine.h
index f731b8d..52a50ff 100644
--- a/media/webrtc/fakewebrtcvoiceengine.h
+++ b/media/webrtc/fakewebrtcvoiceengine.h
@@ -43,10 +43,7 @@
 #ifdef USE_WEBRTC_DEV_BRANCH
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #endif
-
-namespace webrtc {
-class ViENetwork;
-}
+#include "webrtc/video_engine/include/vie_network.h"
 
 namespace cricket {
 
@@ -64,6 +61,12 @@
 static const int kFakeDeviceId = 1;
 #endif
 
+static const int kOpusBandwidthNb = 4000;
+static const int kOpusBandwidthMb = 6000;
+static const int kOpusBandwidthWb = 8000;
+static const int kOpusBandwidthSwb = 12000;
+static const int kOpusBandwidthFb = 20000;
+
 // Verify the header extension ID, if enabled, is within the bounds specified in
 // [RFC5285]: 1-14 inclusive.
 #define WEBRTC_CHECK_HEADER_EXTENSION_ID(enable, id) \
@@ -183,6 +186,7 @@
           file(false),
           vad(false),
           codec_fec(false),
+          max_encoding_bandwidth(0),
           red(false),
           nack(false),
           media_processor_registered(false),
@@ -212,6 +216,7 @@
     bool file;
     bool vad;
     bool codec_fec;
+    int max_encoding_bandwidth;
     bool red;
     bool nack;
     bool media_processor_registered;
@@ -308,6 +313,9 @@
   bool GetCodecFEC(int channel) {
     return channels_[channel]->codec_fec;
   }
+  int GetMaxEncodingBandwidth(int channel) {
+    return channels_[channel]->max_encoding_bandwidth;
+  }
   bool GetNACK(int channel) {
     return channels_[channel]->nack;
   }
@@ -316,6 +324,8 @@
   }
   webrtc::ViENetwork* GetViENetwork(int channel) {
     WEBRTC_ASSERT_CHANNEL(channel);
+    // WARNING: This pointer is for verification purposes only. Calling
+    // functions on it may result in undefined behavior!
     return channels_[channel]->vie_network;
   }
   int GetVideoChannel(int channel) {
@@ -488,8 +498,6 @@
   WEBRTC_STUB(LastError, ());
   WEBRTC_STUB(SetOnHoldStatus, (int, bool, webrtc::OnHoldModes));
   WEBRTC_STUB(GetOnHoldStatus, (int, bool&, webrtc::OnHoldModes&));
-  WEBRTC_STUB(SetNetEQPlayoutMode, (int, webrtc::NetEqModes));
-  WEBRTC_STUB(GetNetEQPlayoutMode, (int, webrtc::NetEqModes&));
 
   // webrtc::VoECodec
   WEBRTC_FUNC(NumOfCodecs, ()) {
@@ -625,10 +633,11 @@
   }
   WEBRTC_STUB(GetVADStatus, (int channel, bool& enabled,
                              webrtc::VadModes& mode, bool& disabledDTX));
+
 #ifdef USE_WEBRTC_DEV_BRANCH
   WEBRTC_FUNC(SetFECStatus, (int channel, bool enable)) {
     WEBRTC_CHECK_CHANNEL(channel);
-    if (strcmp(channels_[channel]->send_codec.plname, "opus")) {
+    if (_stricmp(channels_[channel]->send_codec.plname, "opus") != 0) {
       // Return -1 if current send codec is not Opus.
       // TODO(minyue): Excludes other codecs if they support inband FEC.
       return -1;
@@ -641,6 +650,25 @@
     enable = channels_[channel]->codec_fec;
     return 0;
   }
+
+  WEBRTC_FUNC(SetOpusMaxPlaybackRate, (int channel, int frequency_hz)) {
+    WEBRTC_CHECK_CHANNEL(channel);
+    if (_stricmp(channels_[channel]->send_codec.plname, "opus") != 0) {
+      // Return -1 if current send codec is not Opus.
+      return -1;
+    }
+    if (frequency_hz <= 8000)
+      channels_[channel]->max_encoding_bandwidth = kOpusBandwidthNb;
+    else if (frequency_hz <= 12000)
+      channels_[channel]->max_encoding_bandwidth = kOpusBandwidthMb;
+    else if (frequency_hz <= 16000)
+      channels_[channel]->max_encoding_bandwidth = kOpusBandwidthWb;
+    else if (frequency_hz <= 24000)
+      channels_[channel]->max_encoding_bandwidth = kOpusBandwidthSwb;
+    else
+      channels_[channel]->max_encoding_bandwidth = kOpusBandwidthFb;
+    return 0;
+  }
 #endif  // USE_WEBRTC_DEV_BRANCH
 
   // webrtc::VoEDtmf
@@ -999,6 +1027,11 @@
     WEBRTC_CHECK_CHANNEL(channel);
     channels_[channel]->vie_network = vie_network;
     channels_[channel]->video_channel = video_channel;
+    if (vie_network) {
+      // The interface is released here to avoid leaks. A test should not
+      // attempt to call functions on the interface stored in the channel.
+      vie_network->Release();
+    }
     return 0;
   }
 
diff --git a/media/webrtc/webrtcmediaengine.cc b/media/webrtc/webrtcmediaengine.cc
index 252b4e6..cf0fcdf 100644
--- a/media/webrtc/webrtcmediaengine.cc
+++ b/media/webrtc/webrtcmediaengine.cc
@@ -50,7 +50,6 @@
       WebRtcVideoDecoderFactory* decoder_factory) {
     voice_.SetAudioDeviceModule(adm, adm_sc);
     video_.SetVoiceEngine(&voice_);
-    video_.EnableTimedRender();
     video_.SetExternalEncoderFactory(encoder_factory);
     video_.SetExternalDecoderFactory(decoder_factory);
   }
@@ -65,8 +64,9 @@
                      WebRtcVideoEncoderFactory* encoder_factory,
                      WebRtcVideoDecoderFactory* decoder_factory) {
     voice_.SetAudioDeviceModule(adm, adm_sc);
+    video_.SetExternalDecoderFactory(decoder_factory);
+    video_.SetExternalEncoderFactory(encoder_factory);
     video_.SetVoiceEngine(&voice_);
-    video_.EnableTimedRender();
   }
 };
 #endif  // WEBRTC_CHROMIUM_BUILD
@@ -85,7 +85,6 @@
         adm, adm_sc, encoder_factory, decoder_factory);
   }
 #endif // WEBRTC_CHROMIUM_BUILD
-  // This is just to get a diff to run pulse.
   return new cricket::WebRtcMediaEngine(
       adm, adm_sc, encoder_factory, decoder_factory);
 }
diff --git a/media/webrtc/webrtcmediaengine.h b/media/webrtc/webrtcmediaengine.h
index b906f5d..df517ee 100644
--- a/media/webrtc/webrtcmediaengine.h
+++ b/media/webrtc/webrtcmediaengine.h
@@ -122,9 +122,6 @@
   virtual bool SetAudioOptions(const AudioOptions& options) OVERRIDE {
     return delegate_->SetAudioOptions(options);
   }
-  virtual bool SetVideoOptions(const VideoOptions& options) OVERRIDE {
-    return delegate_->SetVideoOptions(options);
-  }
   virtual bool SetAudioDelayOffset(int offset) OVERRIDE {
     return delegate_->SetAudioDelayOffset(offset);
   }
@@ -151,9 +148,6 @@
   virtual bool SetLocalMonitor(bool enable) OVERRIDE {
     return delegate_->SetLocalMonitor(enable);
   }
-  virtual bool SetLocalRenderer(VideoRenderer* renderer) OVERRIDE {
-    return delegate_->SetLocalRenderer(renderer);
-  }
   virtual const std::vector<AudioCodec>& audio_codecs() OVERRIDE {
     return delegate_->audio_codecs();
   }
diff --git a/media/webrtc/webrtcpassthroughrender.h b/media/webrtc/webrtcpassthroughrender.h
index a432776..8d8c488 100644
--- a/media/webrtc/webrtcpassthroughrender.h
+++ b/media/webrtc/webrtcpassthroughrender.h
@@ -41,26 +41,20 @@
   WebRtcPassthroughRender();
   virtual ~WebRtcPassthroughRender();
 
-  virtual int32_t Version(int8_t* version,
-                          uint32_t& remainingBufferInBytes,
-                          uint32_t& position) const {
+  virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE {
     return 0;
   }
 
-  virtual int32_t ChangeUniqueId(const int32_t id) {
-    return 0;
-  }
+  virtual int32_t TimeUntilNextProcess() OVERRIDE { return 0; }
 
-  virtual int32_t TimeUntilNextProcess() { return 0; }
+  virtual int32_t Process() OVERRIDE { return 0; }
 
-  virtual int32_t Process() { return 0; }
-
-  virtual void* Window() {
+  virtual void* Window() OVERRIDE {
     rtc::CritScope cs(&render_critical_);
     return window_;
   }
 
-  virtual int32_t ChangeWindow(void* window) {
+  virtual int32_t ChangeWindow(void* window) OVERRIDE {
     rtc::CritScope cs(&render_critical_);
     window_ = window;
     return 0;
@@ -70,64 +64,60 @@
       const uint32_t stream_id,
       const uint32_t zOrder,
       const float left, const float top,
-      const float right, const float bottom);
+      const float right, const float bottom) OVERRIDE;
 
-  virtual int32_t DeleteIncomingRenderStream(const uint32_t stream_id);
+  virtual int32_t DeleteIncomingRenderStream(const uint32_t stream_id) OVERRIDE;
 
   virtual int32_t AddExternalRenderCallback(
       const uint32_t stream_id,
-      webrtc::VideoRenderCallback* render_object);
+      webrtc::VideoRenderCallback* render_object) OVERRIDE;
 
   virtual int32_t GetIncomingRenderStreamProperties(
       const uint32_t stream_id,
       uint32_t& zOrder,
       float& left, float& top,
-      float& right, float& bottom) const {
+      float& right, float& bottom) const OVERRIDE {
     return -1;
   }
 
-  virtual uint32_t GetIncomingFrameRate(
-      const uint32_t stream_id) {
+  virtual uint32_t GetIncomingFrameRate(const uint32_t stream_id) OVERRIDE {
     return 0;
   }
 
-  virtual uint32_t GetNumIncomingRenderStreams() const {
+  virtual uint32_t GetNumIncomingRenderStreams() const OVERRIDE {
     return static_cast<uint32_t>(stream_render_map_.size());
   }
 
-  virtual bool HasIncomingRenderStream(const uint32_t stream_id) const;
+  virtual bool HasIncomingRenderStream(const uint32_t stream_id) const OVERRIDE;
 
   virtual int32_t RegisterRawFrameCallback(
       const uint32_t stream_id,
-      webrtc::VideoRenderCallback* callback_obj) {
+      webrtc::VideoRenderCallback* callback_obj) OVERRIDE {
     return -1;
   }
 
   virtual int32_t GetLastRenderedFrame(
       const uint32_t stream_id,
-      webrtc::I420VideoFrame &frame) const {
+      webrtc::I420VideoFrame &frame) const OVERRIDE {
     return -1;
   }
 
-  virtual int32_t StartRender(
-      const uint32_t stream_id);
+  virtual int32_t StartRender(const uint32_t stream_id) OVERRIDE;
 
-  virtual int32_t StopRender(
-      const uint32_t stream_id);
+  virtual int32_t StopRender(const uint32_t stream_id) OVERRIDE;
 
-  virtual int32_t ResetRender() { return 0; }
+  virtual int32_t ResetRender() OVERRIDE { return 0; }
 
-  virtual webrtc::RawVideoType PreferredVideoType() const;
+  virtual webrtc::RawVideoType PreferredVideoType() const OVERRIDE;
 
-  virtual bool IsFullScreen() { return false; }
+  virtual bool IsFullScreen() OVERRIDE { return false; }
 
   virtual int32_t GetScreenResolution(uint32_t& screenWidth,
-                                      uint32_t& screenHeight) const {
+                                      uint32_t& screenHeight) const OVERRIDE {
     return -1;
   }
 
-  virtual uint32_t RenderFrameRate(
-      const uint32_t stream_id) {
+  virtual uint32_t RenderFrameRate(const uint32_t stream_id) OVERRIDE {
     return 0;
   }
 
@@ -135,11 +125,12 @@
       const uint32_t stream_id,
       const float left, const float top,
       const float right,
-      const float bottom) {
+      const float bottom) OVERRIDE {
     return -1;
   }
 
-  virtual int32_t SetExpectedRenderDelay(uint32_t stream_id, int32_t delay_ms) {
+  virtual int32_t SetExpectedRenderDelay(uint32_t stream_id,
+                                         int32_t delay_ms) OVERRIDE {
     return -1;
   }
 
@@ -148,22 +139,22 @@
       const unsigned int zOrder,
       const float left, const float top,
       const float right,
-      const float bottom) {
+      const float bottom) OVERRIDE {
     return -1;
   }
 
-  virtual int32_t SetTransparentBackground(const bool enable) {
+  virtual int32_t SetTransparentBackground(const bool enable) OVERRIDE {
     return -1;
   }
 
-  virtual int32_t FullScreenRender(void* window, const bool enable) {
+  virtual int32_t FullScreenRender(void* window, const bool enable) OVERRIDE {
     return -1;
   }
 
   virtual int32_t SetBitmap(const void* bitMap,
       const uint8_t pictureId, const void* colorKey,
       const float left, const float top,
-      const float right, const float bottom) {
+      const float right, const float bottom) OVERRIDE {
     return -1;
   }
 
@@ -173,27 +164,27 @@
       const uint32_t textColorRef,
       const uint32_t backgroundColorRef,
       const float left, const float top,
-      const float right, const float bottom) {
+      const float right, const float bottom) OVERRIDE {
     return -1;
   }
 
   virtual int32_t SetStartImage(
       const uint32_t stream_id,
-      const webrtc::I420VideoFrame& videoFrame) {
+      const webrtc::I420VideoFrame& videoFrame) OVERRIDE {
     return -1;
   }
 
   virtual int32_t SetTimeoutImage(
       const uint32_t stream_id,
       const webrtc::I420VideoFrame& videoFrame,
-      const uint32_t timeout) {
+      const uint32_t timeout) OVERRIDE {
     return -1;
   }
 
   virtual int32_t MirrorRenderStream(const int renderId,
                                      const bool enable,
                                      const bool mirrorXAxis,
-                                     const bool mirrorYAxis) {
+                                     const bool mirrorYAxis) OVERRIDE {
     return -1;
   }
 
diff --git a/media/webrtc/webrtcvideoengine.cc b/media/webrtc/webrtcvideoengine.cc
index 83b1177..b2533b3 100644
--- a/media/webrtc/webrtcvideoengine.cc
+++ b/media/webrtc/webrtcvideoengine.cc
@@ -64,6 +64,22 @@
 #include "webrtc/experiments.h"
 #include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
 
+namespace {
+
+template <class T>
+bool Changed(cricket::Settable<T> proposed,
+             cricket::Settable<T> original) {
+  return proposed.IsSet() && proposed != original;
+}
+
+template <class T>
+bool Changed(cricket::Settable<T> proposed,
+             cricket::Settable<T> original,
+             T* value) {
+  return proposed.Get(value) && proposed != original;
+}
+
+}  // namespace
 
 namespace cricket {
 
@@ -81,6 +97,9 @@
 
 const int kCpuMonitorPeriodMs = 2000;  // 2 seconds.
 
+// TODO(pthatcher): Figure out what the proper value here is, or if we
+// can just remove this altogether.
+static const int kDefaultRenderDelayMs = 100;
 
 static const int kDefaultLogSeverity = rtc::LS_WARNING;
 
@@ -926,8 +945,6 @@
   initialized_ = false;
   SetTraceFilter(SeverityToFilter(kDefaultLogSeverity));
   render_module_.reset(new WebRtcPassthroughRender());
-  local_renderer_w_ = local_renderer_h_ = 0;
-  local_renderer_ = NULL;
   capture_started_ = false;
   decoder_factory_ = NULL;
   encoder_factory_ = NULL;
@@ -953,6 +970,9 @@
     LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
   }
 
+  // Consider jitter, packet loss, etc when rendering.  This will
+  // theoretically make rendering more smooth.
+  EnableTimedRender();
 
   // Load our RTP Header extensions.
   rtp_header_extensions_.push_back(
@@ -1059,10 +1079,6 @@
   return VIDEO_RECV | VIDEO_SEND;
 }
 
-bool WebRtcVideoEngine::SetOptions(const VideoOptions &options) {
-  return true;
-}
-
 bool WebRtcVideoEngine::SetDefaultEncoderConfig(
     const VideoEncoderConfig& config) {
   return SetDefaultCodec(config.max_codec);
@@ -1109,12 +1125,6 @@
   return channel;
 }
 
-bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
-  local_renderer_w_ = local_renderer_h_ = 0;
-  local_renderer_ = renderer;
-  return true;
-}
-
 const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
   return video_codecs_;
 }
@@ -1568,6 +1578,7 @@
       remb_enabled_(false),
       render_started_(false),
       first_receive_ssrc_(kSsrcUnset),
+      receiver_report_ssrc_(kSsrcUnset),
       num_unsignalled_recv_channels_(0),
       send_rtx_type_(-1),
       send_red_type_(-1),
@@ -1580,7 +1591,19 @@
 
 bool WebRtcVideoMediaChannel::Init() {
   const uint32 ssrc_key = 0;
-  return CreateChannel(ssrc_key, MD_SENDRECV, &default_channel_id_);
+  bool result = CreateChannel(ssrc_key, MD_SENDRECV, &default_channel_id_);
+  if (!result) {
+    return false;
+  }
+  if (voice_channel_) {
+    WebRtcVoiceMediaChannel* voice_channel =
+        static_cast<WebRtcVoiceMediaChannel*>(voice_channel_);
+    if (!voice_channel->SetupSharedBandwidthEstimation(
+        engine()->vie()->engine(), default_channel_id_)) {
+      return false;
+    }
+  }
+  return true;
 }
 
 WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
@@ -1752,6 +1775,35 @@
   return true;
 }
 
+bool WebRtcVideoMediaChannel::MaybeRegisterExternalEncoder(
+    WebRtcVideoChannelSendInfo* send_channel,
+    const webrtc::VideoCodec& codec) {
+  // Codec type not supported or encoder already registered, so
+  // nothing to do.
+  if (!engine()->IsExternalEncoderCodecType(codec.codecType)
+      || send_channel->IsEncoderRegistered(codec.plType)) {
+    return true;
+  }
+
+  webrtc::VideoEncoder* encoder =
+      engine()->CreateExternalEncoder(codec.codecType);
+  if (!encoder) {
+    // No encoder factor, so nothing to do.
+    return true;
+  }
+
+  const int channel_id = send_channel->channel_id();
+  if (engine()->vie()->ext_codec()->RegisterExternalSendCodec(
+          channel_id, codec.plType, encoder, false) != 0) {
+    LOG_RTCERR2(RegisterExternalSendCodec, channel_id, codec.plName);
+    engine()->DestroyExternalEncoder(encoder);
+    return false;
+  }
+
+  send_channel->RegisterEncoder(codec.plType, encoder);
+  return true;
+}
+
 bool WebRtcVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
   if (!send_codec_) {
     return false;
@@ -1861,18 +1913,9 @@
   }
 
   WebRtcVideoChannelSendInfo* send_channel = GetSendChannelBySsrcKey(ssrc_key);
-  // Set the send (local) SSRC.
   // If there are multiple send SSRCs, we can only set the first one here, and
   // the rest of the SSRC(s) need to be set after SetSendCodec has been called
-  // (with a codec requires multiple SSRC(s)).
-  if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
-                                           sp.first_ssrc()) != 0) {
-    LOG_RTCERR2(SetLocalSSRC, channel_id, sp.first_ssrc());
-    return false;
-  }
-
-  // Set the corresponding RTX SSRC.
-  if (!SetLocalRtxSsrc(channel_id, sp, sp.first_ssrc(), 0)) {
+  if (!SetLimitedNumberOfSendSsrcs(channel_id, sp, 1)) {
     return false;
   }
 
@@ -1883,21 +1926,9 @@
     return false;
   }
 
-  // At this point the channel's local SSRC has been updated. If the channel is
-  // the default channel make sure that all the receive channels are updated as
-  // well. Receive channels have to have the same SSRC as the default channel in
-  // order to send receiver reports with this SSRC.
+  // Use the SSRC of the default channel in the RTCP receiver reports.
   if (IsDefaultChannelId(channel_id)) {
-    for (RecvChannelMap::const_iterator it = recv_channels_.begin();
-         it != recv_channels_.end(); ++it) {
-      WebRtcVideoChannelRecvInfo* info = it->second;
-      int channel_id = info->channel_id();
-      if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
-                                               sp.first_ssrc()) != 0) {
-        LOG_RTCERR1(SetLocalSSRC, it->first);
-        return false;
-      }
-    }
+    SetReceiverReportSsrc(sp.first_ssrc());
   }
 
   send_channel->set_stream_params(sp);
@@ -2018,25 +2049,6 @@
     return false;
   }
 
-  // Get the default renderer.
-  VideoRenderer* default_renderer = NULL;
-  if (InConferenceMode()) {
-    // The recv_channels_ size start out being 1, so if it is two here
-    // this is the first receive channel created (default_channel_id_
-    // is not used for receiving in a conference call). This means
-    // that the renderer stored inside default_channel_id_ should be
-    // used for the just created channel.
-    if (recv_channels_.size() == 2 && GetDefaultRecvChannel()) {
-      GetDefaultRenderer(&default_renderer);
-    }
-  }
-
-  // The first recv stream reuses the default renderer (if a default renderer
-  // has been set).
-  if (default_renderer) {
-    SetRenderer(sp.first_ssrc(), default_renderer);
-  }
-
   LOG(LS_INFO) << "New video stream " << sp.first_ssrc()
                << " registered to VideoEngine channel #"
                << channel_id << " and connected to channel #"
@@ -2228,6 +2240,9 @@
     return true;
   }
   if (!GetSendChannelBySsrcKey(local_ssrc)) {
+    // If a stream has multiple ssrcs, the local_ssrc could be any of
+    // them, but we use the first one (StreamParams::first_ssrc()) as
+    // the key.
     for (SendChannelMap::iterator iter = send_channels_.begin();
          iter != send_channels_.end(); ++iter) {
       WebRtcVideoChannelSendInfo* send_channel = iter->second;
@@ -2898,7 +2913,7 @@
   }
 
   // On success, SetSendCodec() will reset |send_start_bitrate_| to |bps/1000|,
-  // by calling MaybeChangeBitrates.  That method will also clamp the
+  // by calling SanitizeBitrates.  That method will also clamp the
   // start bitrate between min and max, consistent with the override behavior
   // in SetMaxSendBandwidth.
   webrtc::VideoCodec new_codec = *send_codec_;
@@ -2934,39 +2949,10 @@
     return true;
   }
 
-  // Trigger SetSendCodec to set correct noise reduction state if the option has
-  // changed.
-  bool denoiser_changed = options.video_noise_reduction.IsSet() &&
-      (options_.video_noise_reduction != options.video_noise_reduction);
-
-  bool leaky_bucket_changed = options.video_leaky_bucket.IsSet() &&
-      (options_.video_leaky_bucket != options.video_leaky_bucket);
-
-  bool buffer_latency_changed = options.buffered_mode_latency.IsSet() &&
-      (options_.buffered_mode_latency != options.buffered_mode_latency);
-
-  bool dscp_option_changed = (options_.dscp != options.dscp);
-
-  bool suspend_below_min_bitrate_changed =
-      options.suspend_below_min_bitrate.IsSet() &&
-      (options_.suspend_below_min_bitrate != options.suspend_below_min_bitrate);
-
-  bool conference_mode_turned_off = false;
-  if (options_.conference_mode.IsSet() && options.conference_mode.IsSet() &&
-      options_.conference_mode.GetWithDefaultIfUnset(false) &&
-      !options.conference_mode.GetWithDefaultIfUnset(false)) {
-    conference_mode_turned_off = true;
-  }
-
-#ifdef USE_WEBRTC_DEV_BRANCH
-  bool payload_padding_changed = options.use_payload_padding.IsSet() &&
-      options_.use_payload_padding != options.use_payload_padding;
-#endif
-
-
   // Save the options, to be interpreted where appropriate.
   // Use options_.SetAll() instead of assignment so that unset value in options
   // will not overwrite the previous option value.
+  VideoOptions original = options_;
   options_.SetAll(options);
 
   // Set CPU options for all send channels.
@@ -2977,38 +2963,36 @@
   }
 
   if (send_codec_) {
-    bool reset_send_codec_needed = denoiser_changed;
     webrtc::VideoCodec new_codec = *send_codec_;
 
+    bool conference_mode_turned_off = (
+        original.conference_mode.IsSet() &&
+        options.conference_mode.IsSet() &&
+        original.conference_mode.GetWithDefaultIfUnset(false) &&
+        !options.conference_mode.GetWithDefaultIfUnset(false));
     if (conference_mode_turned_off) {
       // This is a special case for turning conference mode off.
       // Max bitrate should go back to the default maximum value instead
       // of the current maximum.
       new_codec.maxBitrate = kAutoBandwidth;
-      reset_send_codec_needed = true;
     }
 
     // TODO(pthatcher): Remove this.  We don't need 4 ways to set bitrates.
     int new_start_bitrate;
     if (options.video_start_bitrate.Get(&new_start_bitrate)) {
       new_codec.startBitrate = new_start_bitrate;
-      reset_send_codec_needed = true;
     }
 
-
-    LOG(LS_INFO) << "Reset send codec needed is enabled? "
-                 << reset_send_codec_needed;
-    if (reset_send_codec_needed) {
-      if (!SetSendCodec(new_codec)) {
-        return false;
-      }
-      LogSendCodecChange("SetOptions()");
+    if (!SetSendCodec(new_codec)) {
+      return false;
     }
+    LogSendCodecChange("SetOptions()");
   }
 
-  if (leaky_bucket_changed) {
-    bool enable_leaky_bucket =
-        options_.video_leaky_bucket.GetWithDefaultIfUnset(true);
+  bool enable_leaky_bucket;
+  if (Changed(options.video_leaky_bucket,
+              original.video_leaky_bucket,
+              &enable_leaky_bucket)) {
     LOG(LS_INFO) << "Leaky bucket is enabled? " << enable_leaky_bucket;
     for (SendChannelMap::iterator it = send_channels_.begin();
         it != send_channels_.end(); ++it) {
@@ -3022,10 +3006,11 @@
       }
     }
   }
-  if (buffer_latency_changed) {
-    int buffer_latency =
-        options_.buffered_mode_latency.GetWithDefaultIfUnset(
-            cricket::kBufferedModeDisabled);
+
+  int buffer_latency;
+  if (Changed(options.buffered_mode_latency,
+              original.buffered_mode_latency,
+              &buffer_latency)) {
     LOG(LS_INFO) << "Buffer latency is " << buffer_latency;
     for (SendChannelMap::iterator it = send_channels_.begin();
         it != send_channels_.end(); ++it) {
@@ -3044,17 +3029,24 @@
       }
     }
   }
-  if (dscp_option_changed) {
+
+  bool dscp_enabled;
+  if (Changed(options.dscp, original.dscp, &dscp_enabled)) {
     rtc::DiffServCodePoint dscp = rtc::DSCP_DEFAULT;
-    if (options_.dscp.GetWithDefaultIfUnset(false))
+    if (dscp_enabled) {
       dscp = kVideoDscpValue;
+    }
     LOG(LS_INFO) << "DSCP is " << dscp;
     if (MediaChannel::SetDscp(dscp) != 0) {
       LOG(LS_WARNING) << "Failed to set DSCP settings for video channel";
     }
   }
-  if (suspend_below_min_bitrate_changed) {
-    if (options_.suspend_below_min_bitrate.GetWithDefaultIfUnset(false)) {
+
+  bool suspend_below_min_bitrate;
+  if (Changed(options.suspend_below_min_bitrate,
+              original.suspend_below_min_bitrate,
+              &suspend_below_min_bitrate)) {
+    if (suspend_below_min_bitrate) {
       LOG(LS_INFO) << "Suspend below min bitrate enabled.";
       for (SendChannelMap::iterator it = send_channels_.begin();
            it != send_channels_.end(); ++it) {
@@ -3065,14 +3057,17 @@
       LOG(LS_WARNING) << "Cannot disable video suspension once it is enabled";
     }
   }
+
 #ifdef USE_WEBRTC_DEV_BRANCH
-  if (payload_padding_changed) {
+  bool use_payload_padding;
+  if (Changed(options.use_payload_padding,
+              original.use_payload_padding,
+              &use_payload_padding)) {
     LOG(LS_INFO) << "Payload-based padding called.";
     for (SendChannelMap::iterator it = send_channels_.begin();
             it != send_channels_.end(); ++it) {
       engine()->vie()->rtp()->SetPadWithRedundantPayloads(
-          it->second->channel_id(),
-          options_.use_payload_padding.GetWithDefaultIfUnset(false));
+          it->second->channel_id(), use_payload_padding);
     }
   }
 #endif
@@ -3139,10 +3134,6 @@
   return true;
 }
 
-bool WebRtcVideoMediaChannel::GetDefaultRenderer(VideoRenderer** renderer) {
-  return GetRenderer(kDefaultChannelSsrcKey, renderer);
-}
-
 bool WebRtcVideoMediaChannel::GetVideoAdapter(
     uint32 ssrc, CoordinatedVideoAdapter** video_adapter) {
   WebRtcVideoChannelSendInfo* send_channel = GetSendChannelBySsrc(ssrc);
@@ -3413,6 +3404,11 @@
     return false;
   }
 
+  if (engine()->vie()->render()->SetExpectedRenderDelay(
+          channel_id, kDefaultRenderDelayMs)) {
+    LOG_RTCERR2(SetExpectedRenderDelay,
+                channel_id, kDefaultRenderDelayMs);
+  }
 
   if (engine_->vie()->rtp()->SetRembStatus(channel_id,
                                            kNotSending,
@@ -3431,20 +3427,13 @@
     return false;
   }
 
-  if (remote_ssrc != kDefaultChannelSsrcKey) {
-    // Use the same SSRC as our default channel
-    // (so the RTCP reports are correct).
-    unsigned int send_ssrc = 0;
-    webrtc::ViERTP_RTCP* rtp = engine()->vie()->rtp();
-    if (rtp->GetLocalSSRC(default_channel_id_, send_ssrc) == -1) {
-      LOG_RTCERR2(GetLocalSSRC, default_channel_id_, send_ssrc);
+  if (receiver_report_ssrc_ != kSsrcUnset) {
+    if (engine()->vie()->rtp()->SetLocalSSRC(
+            channel_id, receiver_report_ssrc_) == -1) {
+      LOG_RTCERR2(SetLocalSSRC, channel_id, receiver_report_ssrc_);
       return false;
     }
-    if (rtp->SetLocalSSRC(channel_id, send_ssrc) == -1) {
-      LOG_RTCERR2(SetLocalSSRC, channel_id, send_ssrc);
-      return false;
-    }
-  }  // Else this is the the default channel and we don't change the SSRC.
+  }
 
   // Disable color enhancement since it is a bit too aggressive.
   if (engine()->vie()->image()->EnableColorEnhancement(channel_id,
@@ -3673,21 +3662,7 @@
     target_codec.codecSpecific.VP8.denoisingOn = enable_denoising;
   }
 
-  // Register external encoder if codec type is supported by encoder factory.
-  if (engine()->IsExternalEncoderCodecType(codec.codecType) &&
-      !send_channel->IsEncoderRegistered(target_codec.plType)) {
-    webrtc::VideoEncoder* encoder =
-        engine()->CreateExternalEncoder(codec.codecType);
-    if (encoder) {
-      if (engine()->vie()->ext_codec()->RegisterExternalSendCodec(
-          channel_id, target_codec.plType, encoder, false) == 0) {
-        send_channel->RegisterEncoder(target_codec.plType, encoder);
-      } else {
-        LOG_RTCERR2(RegisterExternalSendCodec, channel_id, target_codec.plName);
-        engine()->DestroyExternalEncoder(encoder);
-      }
-    }
-  }
+  MaybeRegisterExternalEncoder(send_channel, target_codec);
 
   // Resolution and framerate may vary for different send channels.
   const VideoFormat& video_format = send_channel->video_format();
@@ -3698,7 +3673,8 @@
     LOG(LS_INFO) << "0x0 resolution selected. Captured frames will be dropped "
                  << "for ssrc: " << ssrc << ".";
   } else {
-    MaybeChangeBitrates(channel_id, &target_codec);
+    StreamParams* send_params = send_channel->stream_params();
+    SanitizeBitrates(channel_id, &target_codec);
     webrtc::VideoCodec current_codec;
     if (!engine()->vie()->codec()->GetSendCodec(channel_id, current_codec)) {
       // Compare against existing configured send codec.
@@ -3713,6 +3689,11 @@
       return false;
     }
 
+    if (send_params) {
+      if (!SetSendSsrcs(channel_id, *send_params, target_codec)) {
+        return false;
+      }
+    }
     // NOTE: SetRtxSendPayloadType must be called after all simulcast SSRCs
     // are configured. Otherwise ssrc's configured after this point will use
     // the primary PT for RTX.
@@ -3956,6 +3937,7 @@
   int screencast_min_bitrate =
       options_.screencast_min_bitrate.GetWithDefaultIfUnset(0);
   bool leaky_bucket = options_.video_leaky_bucket.GetWithDefaultIfUnset(true);
+  StreamParams* send_params = send_channel->stream_params();
   bool reset_send_codec =
     target_width != cur_width || target_height != cur_height;
   if (vie_codec.codecType == webrtc::kVideoCodecVP8) {
@@ -3979,7 +3961,7 @@
       vie_codec.codecSpecific.VP8.denoisingOn = enable_denoising;
       vie_codec.codecSpecific.VP8.frameDroppingOn = vp8_frame_dropping;
     }
-    MaybeChangeBitrates(channel_id, &vie_codec);
+    SanitizeBitrates(channel_id, &vie_codec);
 
     if (engine()->vie()->codec()->SetSendCodec(channel_id, vie_codec) != 0) {
       LOG_RTCERR1(SetSendCodec, channel_id);
@@ -4001,6 +3983,13 @@
       engine()->vie()->rtp()->SetTransmissionSmoothingStatus(channel_id,
                                                              leaky_bucket);
     }
+    // TODO(sriniv): SetSendCodec already sets ssrc's like below.
+    // Consider removing.
+    if (send_params) {
+      if (!SetSendSsrcs(channel_id, *send_params, target_codec)) {
+        return false;
+      }
+    }
     if (reset) {
       *reset = true;
     }
@@ -4010,7 +3999,7 @@
   return true;
 }
 
-void WebRtcVideoMediaChannel::MaybeChangeBitrates(
+void WebRtcVideoMediaChannel::SanitizeBitrates(
   int channel_id, webrtc::VideoCodec* codec) {
   codec->minBitrate = GetBitrate(codec->minBitrate, kMinVideoBitrate);
   codec->startBitrate = GetBitrate(codec->startBitrate, kStartVideoBitrate);
@@ -4047,7 +4036,6 @@
       codec->startBitrate = current_target_bitrate;
     }
   }
-
 }
 
 void WebRtcVideoMediaChannel::OnMessage(rtc::Message* msg) {
@@ -4156,21 +4144,55 @@
   return SetHeaderExtension(setter, channel_id, extension);
 }
 
-bool WebRtcVideoMediaChannel::SetLocalRtxSsrc(int channel_id,
-                                              const StreamParams& send_params,
-                                              uint32 primary_ssrc,
-                                              int stream_idx) {
-  uint32 rtx_ssrc = 0;
-  bool has_rtx = send_params.GetFidSsrc(primary_ssrc, &rtx_ssrc);
-  if (has_rtx && engine()->vie()->rtp()->SetLocalSSRC(
-      channel_id, rtx_ssrc, webrtc::kViEStreamTypeRtx, stream_idx) != 0) {
-    LOG_RTCERR4(SetLocalSSRC, channel_id, rtx_ssrc,
-                webrtc::kViEStreamTypeRtx, stream_idx);
+bool WebRtcVideoMediaChannel::SetPrimaryAndRtxSsrcs(
+    int channel_id, int idx, uint32 primary_ssrc,
+    const StreamParams& send_params) {
+  LOG(LS_INFO) << "Set primary ssrc " << primary_ssrc
+               << " on channel " << channel_id << " idx " << idx;
+  if (engine()->vie()->rtp()->SetLocalSSRC(
+          channel_id, primary_ssrc, webrtc::kViEStreamTypeNormal, idx) != 0) {
+    LOG_RTCERR4(SetLocalSSRC,
+                channel_id, primary_ssrc, webrtc::kViEStreamTypeNormal, idx);
     return false;
   }
+
+  uint32 rtx_ssrc = 0;
+  if (send_params.GetFidSsrc(primary_ssrc, &rtx_ssrc)) {
+    LOG(LS_INFO) << "Set rtx ssrc " << rtx_ssrc
+                 << " on channel " << channel_id << " idx " << idx;
+    if (engine()->vie()->rtp()->SetLocalSSRC(
+            channel_id, rtx_ssrc, webrtc::kViEStreamTypeRtx, idx) != 0) {
+      LOG_RTCERR4(SetLocalSSRC,
+                  channel_id, rtx_ssrc, webrtc::kViEStreamTypeRtx, idx);
+      return false;
+    }
+  }
   return true;
 }
 
+bool WebRtcVideoMediaChannel::SetLimitedNumberOfSendSsrcs(
+    int channel_id, const StreamParams& sp, size_t limit) {
+  const SsrcGroup* sim_group = sp.get_ssrc_group(kSimSsrcGroupSemantics);
+  if (!sim_group || limit == 1) {
+    return SetPrimaryAndRtxSsrcs(channel_id, 0, sp.first_ssrc(), sp);
+  }
+
+  std::vector<uint32> ssrcs = sim_group->ssrcs;
+  for (size_t i = 0; i < ssrcs.size() && i < limit; ++i) {
+    if (!SetPrimaryAndRtxSsrcs(channel_id, static_cast<int>(i), ssrcs[i], sp)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendSsrcs(
+    int channel_id, const StreamParams& sp,
+    const webrtc::VideoCodec& codec) {
+  // TODO(pthatcher): Support more than one primary SSRC per stream.
+  return SetLimitedNumberOfSendSsrcs(channel_id, sp, 1);
+}
+
 void WebRtcVideoMediaChannel::MaybeConnectCapturer(VideoCapturer* capturer) {
   if (capturer && GetSendChannelNum(capturer) == 1) {
     capturer->SignalVideoFrame.connect(this,
@@ -4184,6 +4206,18 @@
   }
 }
 
+void WebRtcVideoMediaChannel::SetReceiverReportSsrc(uint32 ssrc) {
+  for (RecvChannelMap::const_iterator it = recv_channels_.begin();
+       it != recv_channels_.end(); ++it) {
+    int channel_id = it->second->channel_id();
+    if (engine()->vie()->rtp()->SetLocalSSRC(channel_id, ssrc) != 0) {
+      LOG_RTCERR2(SetLocalSSRC, channel_id, ssrc);
+      ASSERT(false);
+    }
+  }
+  receiver_report_ssrc_ = ssrc;
+}
+
 }  // namespace cricket
 
 #endif  // HAVE_WEBRTC_VIDEO
diff --git a/media/webrtc/webrtcvideoengine.h b/media/webrtc/webrtcvideoengine.h
index c31f547..6f939d2 100644
--- a/media/webrtc/webrtcvideoengine.h
+++ b/media/webrtc/webrtcvideoengine.h
@@ -42,6 +42,9 @@
 
 #if !defined(LIBPEERCONNECTION_LIB) && \
     !defined(LIBPEERCONNECTION_IMPLEMENTATION)
+// If you hit this, then you've tried to include this header from outside
+// a shared library.  An instance of this class must only be created from
+// within the library that actually implements it.
 #error "Bogus include."
 #endif
 
@@ -98,24 +101,23 @@
                     ViEWrapper* vie_wrapper,
                     ViETraceWrapper* tracing,
                     rtc::CpuMonitor* cpu_monitor);
-  ~WebRtcVideoEngine();
+  virtual ~WebRtcVideoEngine();
 
   // Basic video engine implementation.
   bool Init(rtc::Thread* worker_thread);
   void Terminate();
 
   int GetCapabilities();
-  bool SetOptions(const VideoOptions &options);
   bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
   VideoEncoderConfig GetDefaultEncoderConfig() const;
 
-  WebRtcVideoMediaChannel* CreateChannel(VoiceMediaChannel* voice_channel);
+  virtual WebRtcVideoMediaChannel* CreateChannel(
+      VoiceMediaChannel* voice_channel);
 
   const std::vector<VideoCodec>& codecs() const;
   const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
   void SetLogging(int min_sev, const char* filter);
 
-  bool SetLocalRenderer(VideoRenderer* renderer);
   sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
 
   // Set the VoiceEngine for A/V sync. This can only be called before Init.
@@ -127,7 +129,8 @@
   // Set a WebRtcVideoEncoderFactory for external encoding. Video engine does
   // not take the ownership of |encoder_factory|. The caller needs to make sure
   // that |encoder_factory| outlives the video engine.
-  void SetExternalEncoderFactory(WebRtcVideoEncoderFactory* encoder_factory);
+  virtual void SetExternalEncoderFactory(
+      WebRtcVideoEncoderFactory* encoder_factory);
   // Enable the render module with timing control.
   bool EnableTimedRender();
 
@@ -225,9 +228,6 @@
   VideoChannels channels_;
 
   bool capture_started_;
-  int local_renderer_w_;
-  int local_renderer_h_;
-  VideoRenderer* local_renderer_;
 
   rtc::scoped_ptr<rtc::CpuMonitor> cpu_monitor_;
 };
@@ -238,7 +238,7 @@
  public:
   WebRtcVideoMediaChannel(WebRtcVideoEngine* engine,
                           VoiceMediaChannel* voice_channel);
-  ~WebRtcVideoMediaChannel();
+  virtual ~WebRtcVideoMediaChannel();
   bool Init();
 
   WebRtcVideoEngine* engine() { return engine_; }
@@ -310,6 +310,20 @@
   virtual int SendPacket(int channel, const void* data, int len);
   virtual int SendRTCPPacket(int channel, const void* data, int len);
 
+  // Checks the current bitrate estimate and modifies the bitrates
+  // accordingly, including converting kAutoBandwidth to the correct defaults.
+  virtual void SanitizeBitrates(
+      int channel_id, webrtc::VideoCodec* video_codec);
+  virtual void LogSendCodecChange(const std::string& reason);
+  bool SetPrimaryAndRtxSsrcs(
+      int channel_id, int idx, uint32 primary_ssrc,
+      const StreamParams& send_params);
+  bool SetLimitedNumberOfSendSsrcs(
+      int channel_id, const StreamParams& send_params, size_t limit);
+  virtual bool SetSendSsrcs(
+      int channel_id, const StreamParams& send_params,
+      const webrtc::VideoCodec& codec);
+
  private:
   typedef std::map<uint32, WebRtcVideoChannelRecvInfo*> RecvChannelMap;
   typedef std::map<uint32, WebRtcVideoChannelSendInfo*> SendChannelMap;
@@ -339,22 +353,23 @@
   bool SetSendCodec(const webrtc::VideoCodec& codec);
   bool SetSendCodec(WebRtcVideoChannelSendInfo* send_channel,
                     const webrtc::VideoCodec& codec);
-  void LogSendCodecChange(const std::string& reason);
   // Prepares the channel with channel id |info->channel_id()| to receive all
   // codecs in |receive_codecs_| and start receive packets.
   bool SetReceiveCodecs(WebRtcVideoChannelRecvInfo* info);
   // Returns the channel ID that receives the stream with SSRC |ssrc|.
   int GetRecvChannelId(uint32 ssrc);
   bool MaybeSetRtxSsrc(const StreamParams& sp, int channel_id);
+  // Create and register an external endcoder if it's possible to do
+  // so and one isn't already registered.
+  bool MaybeRegisterExternalEncoder(
+      WebRtcVideoChannelSendInfo* send_channel,
+      const webrtc::VideoCodec& codec);
   // Given captured video frame size, checks if we need to reset vie send codec.
   // |reset| is set to whether resetting has happened on vie or not.
   // Returns false on error.
   bool MaybeResetVieSendCodec(WebRtcVideoChannelSendInfo* send_channel,
                               int new_width, int new_height, bool is_screencast,
                               bool* reset);
-  // Checks the current bitrate estimate and modifies the bitrates
-  // accordingly, including converting kAutoBandwidth to the correct defaults.
-  void MaybeChangeBitrates(int channel_id, webrtc::VideoCodec* video_codec);
   // Helper function for starting the sending of media on all channels or
   // |channel_id|. Note that these two function do not change |sending_|.
   bool StartSend();
@@ -371,9 +386,10 @@
   // Returns the ssrc key corresponding to the provided local SSRC in
   // |ssrc_key|. The return value is true upon success.  If the local
   // ssrc correspond to that of the default channel the key is
-  // kDefaultChannelSsrcKey.
-  // For all other channels the returned ssrc key will be the same as
-  // the local ssrc.
+  // kDefaultChannelSsrcKey.  For all other channels the returned ssrc
+  // key will be the same as the local ssrc.  If a stream has more
+  // than one ssrc, the first (corresponding to
+  // StreamParams::first_ssrc()) is used as the key.
   bool GetSendChannelSsrcKey(uint32 local_ssrc, uint32* ssrc_key);
   WebRtcVideoChannelSendInfo* GetDefaultSendChannel();
   WebRtcVideoChannelSendInfo* GetSendChannelBySsrcKey(uint32 ssrc_key);
@@ -387,8 +403,6 @@
   bool IsDefaultChannelId(int channel_id) const {
     return channel_id == default_channel_id_;
   }
-  bool GetDefaultRenderer(VideoRenderer** renderer);
-
   bool DeleteSendChannel(uint32 ssrc_key);
 
   WebRtcVideoChannelRecvInfo* GetDefaultRecvChannel();
@@ -414,10 +428,6 @@
   // Signal when cpu adaptation has no further scope to adapt.
   void OnCpuAdaptationUnable();
 
-  // Set the local (send-side) RTX SSRC corresponding to primary_ssrc.
-  bool SetLocalRtxSsrc(int channel_id, const StreamParams& send_params,
-                       uint32 primary_ssrc, int stream_idx);
-
   // Connect |capturer| to WebRtcVideoMediaChannel if it is only registered
   // to one send channel, i.e. the first send channel.
   void MaybeConnectCapturer(VideoCapturer* capturer);
@@ -427,6 +437,9 @@
 
   bool RemoveRecvStreamInternal(uint32 ssrc);
 
+  // Set the ssrc to use for RTCP receiver reports.
+  void SetReceiverReportSsrc(uint32 ssrc);
+
   // Global state.
   WebRtcVideoEngine* engine_;
   VoiceMediaChannel* voice_channel_;
@@ -454,6 +467,7 @@
   std::map<int, int> associated_payload_types_;
   bool render_started_;
   uint32 first_receive_ssrc_;
+  uint32 receiver_report_ssrc_;
   std::vector<RtpHeaderExtension> receive_extensions_;
   int num_unsignalled_recv_channels_;
 
diff --git a/media/webrtc/webrtcvideoengine2.cc b/media/webrtc/webrtcvideoengine2.cc
index ea53596..26e3079 100644
--- a/media/webrtc/webrtcvideoengine2.cc
+++ b/media/webrtc/webrtcvideoengine2.cc
@@ -42,8 +42,7 @@
 #include "webrtc/base/logging.h"
 #include "webrtc/base/stringutils.h"
 #include "webrtc/call.h"
-// TODO(pbos): Move codecs out of modules (webrtc:3070).
-#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
+#include "webrtc/video_encoder.h"
 
 #define UNIMPLEMENTED                                                 \
   LOG(LS_ERROR) << "Call to unimplemented function " << __FUNCTION__; \
@@ -55,6 +54,8 @@
 // duration hasn't been implemented.
 static const int kNackHistoryMs = 1000;
 
+static const int kDefaultQpMax = 56;
+
 static const int kDefaultRtcpReceiverReportSsrc = 1;
 
 struct VideoCodecPref {
@@ -187,7 +188,7 @@
   stream.min_bitrate_bps = min_bitrate * 1000;
   stream.target_bitrate_bps = stream.max_bitrate_bps = max_bitrate * 1000;
 
-  int max_qp = 56;
+  int max_qp = kDefaultQpMax;
   codec.GetParam(kCodecParamMaxQuantization, &max_qp);
   stream.max_qp = max_qp;
   std::vector<webrtc::VideoStream> streams;
@@ -200,7 +201,7 @@
     const VideoOptions& options) {
   assert(SupportsCodec(codec));
   if (_stricmp(codec.name.c_str(), kVp8CodecName) == 0) {
-    return webrtc::VP8Encoder::Create();
+    return webrtc::VideoEncoder::Create(webrtc::VideoEncoder::kVp8);
   }
   // This shouldn't happen, we should be able to create encoders for all codecs
   // we support.
@@ -213,14 +214,9 @@
     const VideoOptions& options) {
   assert(SupportsCodec(codec));
   if (_stricmp(codec.name.c_str(), kVp8CodecName) == 0) {
-    webrtc::VideoCodecVP8* settings = new webrtc::VideoCodecVP8();
-    settings->resilience = webrtc::kResilientStream;
-    settings->numberOfTemporalLayers = 1;
+    webrtc::VideoCodecVP8* settings = new webrtc::VideoCodecVP8(
+        webrtc::VideoEncoder::GetDefaultVp8Settings());
     options.video_noise_reduction.Get(&settings->denoisingOn);
-    settings->errorConcealmentOn = false;
-    settings->automaticResizeOn = false;
-    settings->frameDroppingOn = true;
-    settings->keyFrameInterval = 3000;
     return settings;
   }
   return NULL;
@@ -233,13 +229,9 @@
   if (encoder_settings == NULL) {
     return;
   }
-
   if (_stricmp(codec.name.c_str(), kVp8CodecName) == 0) {
     delete reinterpret_cast<webrtc::VideoCodecVP8*>(encoder_settings);
-    return;
   }
-  // We should be able to destroy all encoder settings we've allocated.
-  assert(false);
 }
 
 bool WebRtcVideoEncoderFactory2::SupportsCodec(const VideoCodec& codec) {
@@ -283,37 +275,19 @@
 }
 
 WebRtcVideoEngine2::WebRtcVideoEngine2()
-    : default_codec_format_(kDefaultVideoCodecPref.width,
+    : worker_thread_(NULL),
+      voice_engine_(NULL),
+      video_codecs_(DefaultVideoCodecs()),
+      default_codec_format_(kDefaultVideoCodecPref.width,
                             kDefaultVideoCodecPref.height,
                             FPS_TO_INTERVAL(kDefaultFramerate),
-                            FOURCC_ANY) {
-  // Construct without a factory or voice engine.
-  Construct(NULL, NULL, new rtc::CpuMonitor(NULL));
-}
-
-WebRtcVideoEngine2::WebRtcVideoEngine2(
-    WebRtcVideoChannelFactory* channel_factory)
-    : default_codec_format_(kDefaultVideoCodecPref.width,
-                            kDefaultVideoCodecPref.height,
-                            FPS_TO_INTERVAL(kDefaultFramerate),
-                            FOURCC_ANY) {
-  // Construct without a voice engine.
-  Construct(channel_factory, NULL, new rtc::CpuMonitor(NULL));
-}
-
-void WebRtcVideoEngine2::Construct(WebRtcVideoChannelFactory* channel_factory,
-                                   WebRtcVoiceEngine* voice_engine,
-                                   rtc::CpuMonitor* cpu_monitor) {
-  LOG(LS_INFO) << "WebRtcVideoEngine2::WebRtcVideoEngine2";
-  worker_thread_ = NULL;
-  voice_engine_ = voice_engine;
-  initialized_ = false;
-  capture_started_ = false;
-  cpu_monitor_.reset(cpu_monitor);
-  channel_factory_ = channel_factory;
-
-  video_codecs_ = DefaultVideoCodecs();
-
+                            FOURCC_ANY),
+      initialized_(false),
+      cpu_monitor_(new rtc::CpuMonitor(NULL)),
+      channel_factory_(NULL),
+      external_decoder_factory_(NULL),
+      external_encoder_factory_(NULL) {
+  LOG(LS_INFO) << "WebRtcVideoEngine2::WebRtcVideoEngine2()";
   rtp_header_extensions_.push_back(
       RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension,
                          kRtpTimestampOffsetHeaderExtensionDefaultId));
@@ -322,6 +296,11 @@
                          kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
 }
 
+void WebRtcVideoEngine2::SetChannelFactory(
+    WebRtcVideoChannelFactory* channel_factory) {
+  channel_factory_ = channel_factory;
+}
+
 WebRtcVideoEngine2::~WebRtcVideoEngine2() {
   LOG(LS_INFO) << "WebRtcVideoEngine2::~WebRtcVideoEngine2";
 
@@ -355,14 +334,6 @@
 
 int WebRtcVideoEngine2::GetCapabilities() { return VIDEO_RECV | VIDEO_SEND; }
 
-bool WebRtcVideoEngine2::SetOptions(const VideoOptions& options) {
-  // TODO(pbos): Do we need this? This is a no-op in the existing
-  // WebRtcVideoEngine implementation.
-  LOG(LS_VERBOSE) << "SetOptions: " << options.ToString();
-  //  options_ = options;
-  return true;
-}
-
 bool WebRtcVideoEngine2::SetDefaultEncoderConfig(
     const VideoEncoderConfig& config) {
   const VideoCodec& codec = config.max_codec;
@@ -424,14 +395,32 @@
   }
 }
 
-bool WebRtcVideoEngine2::EnableTimedRender() {
-  // TODO(pbos): Figure out whether this can be removed.
-  return true;
+void WebRtcVideoEngine2::SetExternalDecoderFactory(
+    WebRtcVideoDecoderFactory* decoder_factory) {
+  external_decoder_factory_ = decoder_factory;
 }
 
-bool WebRtcVideoEngine2::SetLocalRenderer(VideoRenderer* renderer) {
-  // TODO(pbos): Implement or remove. Unclear which stream should be rendered
-  // locally even.
+void WebRtcVideoEngine2::SetExternalEncoderFactory(
+    WebRtcVideoEncoderFactory* encoder_factory) {
+  if (external_encoder_factory_ == encoder_factory) {
+    return;
+  }
+  if (external_encoder_factory_) {
+    external_encoder_factory_->RemoveObserver(this);
+  }
+  external_encoder_factory_ = encoder_factory;
+  if (external_encoder_factory_) {
+    external_encoder_factory_->AddObserver(this);
+  }
+
+  // Invoke OnCodecAvailable() here in case the list of codecs is already
+  // available when the encoder factory is installed. If not the encoder
+  // factory will invoke the callback later when the codecs become available.
+  OnCodecsAvailable();
+}
+
+bool WebRtcVideoEngine2::EnableTimedRender() {
+  // TODO(pbos): Figure out whether this can be removed.
   return true;
 }
 
@@ -517,6 +506,9 @@
   return &default_video_encoder_factory_;
 }
 
+void WebRtcVideoEngine2::OnCodecsAvailable() {
+  // TODO(pbos): Implement.
+}
 // Thin map between VideoFrame and an existing webrtc::I420VideoFrame
 // to avoid having to copy the rendered VideoFrame prematurely.
 // This implementation is only safe to use in a const context and should never
@@ -679,8 +671,8 @@
     WebRtcVideoEngine2* engine,
     VoiceMediaChannel* voice_channel,
     WebRtcVideoEncoderFactory2* encoder_factory)
-    : encoder_factory_(encoder_factory),
-      unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_) {
+    : unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_),
+      encoder_factory_(encoder_factory) {
   // TODO(pbos): Connect the video and audio with |voice_channel|.
   webrtc::Call::Config config(this);
   Construct(webrtc::Call::Create(config), engine);
@@ -690,8 +682,8 @@
     webrtc::Call* call,
     WebRtcVideoEngine2* engine,
     WebRtcVideoEncoderFactory2* encoder_factory)
-    : encoder_factory_(encoder_factory),
-      unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_) {
+    : unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_),
+      encoder_factory_(encoder_factory) {
   Construct(call, engine);
 }
 
@@ -1174,7 +1166,9 @@
 }
 
 void WebRtcVideoChannel2::OnReadyToSend(bool ready) {
-  LOG(LS_VERBOSE) << "OnReadySend: " << (ready ? "Ready." : "Not ready.");
+  LOG(LS_VERBOSE) << "OnReadyToSend: " << (ready ? "Ready." : "Not ready.");
+  call_->SignalNetworkState(ready ? webrtc::Call::kNetworkUp
+                                  : webrtc::Call::kNetworkDown);
 }
 
 bool WebRtcVideoChannel2::MuteStream(uint32 ssrc, bool mute) {
@@ -1315,10 +1309,10 @@
     const StreamParams& sp,
     const std::vector<webrtc::RtpExtension>& rtp_extensions)
     : call_(call),
-      parameters_(webrtc::VideoSendStream::Config(), options, codec_settings),
       encoder_factory_(encoder_factory),
-      capturer_(NULL),
       stream_(NULL),
+      parameters_(webrtc::VideoSendStream::Config(), options, codec_settings),
+      capturer_(NULL),
       sending_(false),
       muted_(false) {
   parameters_.config.rtp.max_packet_size = kVideoMtu;
@@ -1385,16 +1379,10 @@
     const VideoFrame* frame) {
   LOG(LS_VERBOSE) << "InputFrame: " << frame->GetWidth() << "x"
                   << frame->GetHeight();
-  bool is_screencast = capturer->IsScreencast();
   // Lock before copying, can be called concurrently when swapping input source.
   rtc::CritScope frame_cs(&frame_lock_);
-  if (!muted_) {
-    ConvertToI420VideoFrame(*frame, &video_frame_);
-  } else {
-    // Create a tiny black frame to transmit instead.
-    CreateBlackFrame(&video_frame_, 1, 1);
-    is_screencast = false;
-  }
+  ConvertToI420VideoFrame(*frame, &video_frame_);
+
   rtc::CritScope cs(&lock_);
   if (stream_ == NULL) {
     LOG(LS_WARNING) << "Capturer inputting frames before send codecs are "
@@ -1406,14 +1394,20 @@
     LOG(LS_VERBOSE) << "VideoFormat 0x0 set, Dropping frame.";
     return;
   }
-  // Reconfigure codec if necessary.
-  if (is_screencast) {
-    SetDimensions(video_frame_.width(), video_frame_.height());
+  if (muted_) {
+    // Create a black frame to transmit instead.
+    CreateBlackFrame(&video_frame_,
+                     static_cast<int>(frame->GetWidth()),
+                     static_cast<int>(frame->GetHeight()));
   }
+  // Reconfigure codec if necessary.
+  SetDimensions(
+      video_frame_.width(), video_frame_.height(), capturer->IsScreencast());
+
   LOG(LS_VERBOSE) << "SwapFrame: " << video_frame_.width() << "x"
                   << video_frame_.height() << " -> (codec) "
-                  << parameters_.video_streams.back().width << "x"
-                  << parameters_.video_streams.back().height;
+                  << parameters_.encoder_config.streams.back().width << "x"
+                  << parameters_.encoder_config.streams.back().height;
   stream_->Input()->SwapFrame(&video_frame_);
 }
 
@@ -1437,7 +1431,7 @@
         black_frame.CreateEmptyFrame(
             width, height, width, half_width, half_width);
         SetWebRtcFrameToBlack(&black_frame);
-        SetDimensions(width, height);
+        SetDimensions(width, height, false);
         stream_->Input()->SwapFrame(&black_frame);
       }
 
@@ -1469,9 +1463,9 @@
         << parameters_.config.rtp.ssrcs[0] << ".";
   } else {
     // TODO(pbos): Fix me, this only affects the last stream!
-    parameters_.video_streams.back().max_framerate =
+    parameters_.encoder_config.streams.back().max_framerate =
         VideoFormat::IntervalToFps(format.interval);
-    SetDimensions(format.width, format.height);
+    SetDimensions(format.width, format.height, false);
   }
 
   format_ = format;
@@ -1517,7 +1511,7 @@
   if (video_streams.empty()) {
     return;
   }
-  parameters_.video_streams = video_streams;
+  parameters_.encoder_config.streams = video_streams;
   format_ = VideoFormat(codec_settings.codec.width,
                         codec_settings.codec.height,
                         VideoFormat::FpsToInterval(30),
@@ -1560,35 +1554,55 @@
   RecreateWebRtcStream();
 }
 
-void WebRtcVideoChannel2::WebRtcVideoSendStream::SetDimensions(int width,
-                                                               int height) {
-  assert(!parameters_.video_streams.empty());
+void WebRtcVideoChannel2::WebRtcVideoSendStream::SetDimensions(
+    int width,
+    int height,
+    bool override_max) {
+  assert(!parameters_.encoder_config.streams.empty());
   LOG(LS_VERBOSE) << "SetDimensions: " << width << "x" << height;
-  if (parameters_.video_streams.back().width == width &&
-      parameters_.video_streams.back().height == height) {
-    return;
-  }
-
-  // TODO(pbos): Fix me, this only affects the last stream!
-  parameters_.video_streams.back().width = width;
-  parameters_.video_streams.back().height = height;
 
   VideoCodecSettings codec_settings;
   parameters_.codec_settings.Get(&codec_settings);
-  void* encoder_settings = encoder_factory_->CreateVideoEncoderSettings(
-      codec_settings.codec, parameters_.options);
+  // Restrict dimensions according to codec max.
+  if (!override_max) {
+    if (codec_settings.codec.width < width)
+      width = codec_settings.codec.width;
+    if (codec_settings.codec.height < height)
+      height = codec_settings.codec.height;
+  }
 
-  bool stream_reconfigured = stream_->ReconfigureVideoEncoder(
-      parameters_.video_streams, encoder_settings);
+  if (parameters_.encoder_config.streams.back().width == width &&
+      parameters_.encoder_config.streams.back().height == height) {
+    return;
+  }
 
-  encoder_factory_->DestroyVideoEncoderSettings(codec_settings.codec,
-                                                encoder_settings);
+  webrtc::VideoEncoderConfig encoder_config = parameters_.encoder_config;
+  encoder_config.encoder_specific_settings =
+      encoder_factory_->CreateVideoEncoderSettings(codec_settings.codec,
+                                                   parameters_.options);
+
+  VideoCodec codec = codec_settings.codec;
+  codec.width = width;
+  codec.height = height;
+
+  encoder_config.streams = encoder_factory_->CreateVideoStreams(
+      codec, parameters_.options, parameters_.config.rtp.ssrcs.size());
+
+  bool stream_reconfigured = stream_->ReconfigureVideoEncoder(encoder_config);
+
+  encoder_factory_->DestroyVideoEncoderSettings(
+      codec_settings.codec,
+      encoder_config.encoder_specific_settings);
+
+  encoder_config.encoder_specific_settings = NULL;
 
   if (!stream_reconfigured) {
     LOG(LS_WARNING) << "Failed to reconfigure video encoder for dimensions: "
                     << width << "x" << height;
     return;
   }
+
+  parameters_.encoder_config = encoder_config;
 }
 
 void WebRtcVideoChannel2::WebRtcVideoSendStream::Start() {
@@ -1652,9 +1666,9 @@
     info.input_frame_width = last_captured_frame_format.width;
     info.input_frame_height = last_captured_frame_format.height;
     info.send_frame_width =
-        static_cast<int>(parameters_.video_streams.front().width);
+        static_cast<int>(parameters_.encoder_config.streams.front().width);
     info.send_frame_height =
-        static_cast<int>(parameters_.video_streams.front().height);
+        static_cast<int>(parameters_.encoder_config.streams.front().height);
   }
 
   // TODO(pbos): Support or remove the following stats.
@@ -1671,14 +1685,18 @@
 
   VideoCodecSettings codec_settings;
   parameters_.codec_settings.Get(&codec_settings);
-  void* encoder_settings = encoder_factory_->CreateVideoEncoderSettings(
-      codec_settings.codec, parameters_.options);
+  parameters_.encoder_config.encoder_specific_settings =
+      encoder_factory_->CreateVideoEncoderSettings(codec_settings.codec,
+                                                   parameters_.options);
 
-  stream_ = call_->CreateVideoSendStream(
-      parameters_.config, parameters_.video_streams, encoder_settings);
+  stream_ = call_->CreateVideoSendStream(parameters_.config,
+                                         parameters_.encoder_config);
 
-  encoder_factory_->DestroyVideoEncoderSettings(codec_settings.codec,
-                                                encoder_settings);
+  encoder_factory_->DestroyVideoEncoderSettings(
+      codec_settings.codec,
+      parameters_.encoder_config.encoder_specific_settings);
+
+  parameters_.encoder_config.encoder_specific_settings = NULL;
 
   if (sending_) {
     stream_->Start();
@@ -1690,11 +1708,11 @@
     const webrtc::VideoReceiveStream::Config& config,
     const std::vector<VideoCodecSettings>& recv_codecs)
     : call_(call),
-      config_(config),
       stream_(NULL),
+      config_(config),
+      renderer_(NULL),
       last_width_(-1),
-      last_height_(-1),
-      renderer_(NULL) {
+      last_height_(-1) {
   config_.renderer = this;
   // SetRecvCodecs will also reset (start) the VideoReceiveStream.
   SetRecvCodecs(recv_codecs);
diff --git a/media/webrtc/webrtcvideoengine2.h b/media/webrtc/webrtcvideoengine2.h
index c7eb6a4..18a80d7 100644
--- a/media/webrtc/webrtcvideoengine2.h
+++ b/media/webrtc/webrtcvideoengine2.h
@@ -34,10 +34,12 @@
 
 #include "talk/media/base/mediaengine.h"
 #include "talk/media/webrtc/webrtcvideochannelfactory.h"
+#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
+#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
 #include "webrtc/base/cpumonitor.h"
 #include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
 #include "webrtc/common_video/interface/i420_video_frame.h"
-#include "webrtc/system_wrappers/interface/thread_annotations.h"
 #include "webrtc/transport.h"
 #include "webrtc/video_receive_stream.h"
 #include "webrtc/video_renderer.h"
@@ -65,14 +67,13 @@
 class VideoProcessor;
 class VideoRenderer;
 class VoiceMediaChannel;
-class WebRtcVideoChannel2;
 class WebRtcDecoderObserver;
 class WebRtcEncoderObserver;
 class WebRtcLocalStreamInfo;
 class WebRtcRenderAdapter;
+class WebRtcVideoChannel2;
 class WebRtcVideoChannelRecvInfo;
 class WebRtcVideoChannelSendInfo;
-class WebRtcVideoDecoderFactory;
 class WebRtcVoiceEngine;
 
 struct CapturedFrame;
@@ -119,9 +120,8 @@
       const VideoCodec& codec,
       const VideoOptions& options);
 
-  virtual void* CreateVideoEncoderSettings(
-      const VideoCodec& codec,
-      const VideoOptions& options);
+  virtual void* CreateVideoEncoderSettings(const VideoCodec& codec,
+                                           const VideoOptions& options);
 
   virtual void DestroyVideoEncoderSettings(const VideoCodec& codec,
                                            void* encoder_settings);
@@ -130,20 +130,21 @@
 };
 
 // WebRtcVideoEngine2 is used for the new native WebRTC Video API (webrtc:1667).
-class WebRtcVideoEngine2 : public sigslot::has_slots<> {
+class WebRtcVideoEngine2 : public sigslot::has_slots<>,
+                           public WebRtcVideoEncoderFactory::Observer {
  public:
   // Creates the WebRtcVideoEngine2 with internal VideoCaptureModule.
   WebRtcVideoEngine2();
-  // Custom WebRtcVideoChannelFactory for testing purposes.
-  explicit WebRtcVideoEngine2(WebRtcVideoChannelFactory* channel_factory);
-  ~WebRtcVideoEngine2();
+  virtual ~WebRtcVideoEngine2();
+
+  // Use a custom WebRtcVideoChannelFactory (for testing purposes).
+  void SetChannelFactory(WebRtcVideoChannelFactory* channel_factory);
 
   // Basic video engine implementation.
   bool Init(rtc::Thread* worker_thread);
   void Terminate();
 
   int GetCapabilities();
-  bool SetOptions(const VideoOptions& options);
   bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
   VideoEncoderConfig GetDefaultEncoderConfig() const;
 
@@ -153,20 +154,23 @@
   const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
   void SetLogging(int min_sev, const char* filter);
 
+  // Set a WebRtcVideoDecoderFactory for external decoding. Video engine does
+  // not take the ownership of |decoder_factory|. The caller needs to make sure
+  // that |decoder_factory| outlives the video engine.
+  void SetExternalDecoderFactory(WebRtcVideoDecoderFactory* decoder_factory);
+  // Set a WebRtcVideoEncoderFactory for external encoding. Video engine does
+  // not take the ownership of |encoder_factory|. The caller needs to make sure
+  // that |encoder_factory| outlives the video engine.
+  virtual void SetExternalEncoderFactory(
+      WebRtcVideoEncoderFactory* encoder_factory);
+
   bool EnableTimedRender();
-  // No-op, never used.
-  bool SetLocalRenderer(VideoRenderer* renderer);
   // This is currently ignored.
   sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
 
   // Set the VoiceEngine for A/V sync. This can only be called before Init.
   bool SetVoiceEngine(WebRtcVoiceEngine* voice_engine);
 
-  // Functions called by WebRtcVideoChannel2.
-  const VideoFormat& default_codec_format() const {
-    return default_codec_format_;
-  }
-
   bool FindCodec(const VideoCodec& in);
   bool CanSendCodec(const VideoCodec& in,
                     const VideoCodec& current,
@@ -181,9 +185,7 @@
   virtual WebRtcVideoEncoderFactory2* GetVideoEncoderFactory();
 
  private:
-  void Construct(WebRtcVideoChannelFactory* channel_factory,
-                 WebRtcVoiceEngine* voice_engine,
-                 rtc::CpuMonitor* cpu_monitor);
+  virtual void OnCodecsAvailable() OVERRIDE;
 
   rtc::Thread* worker_thread_;
   WebRtcVoiceEngine* voice_engine_;
@@ -193,8 +195,6 @@
 
   bool initialized_;
 
-  bool capture_started_;
-
   // Critical section to protect the media processor register/unregister
   // while processing a frame
   rtc::CriticalSection signal_media_critical_;
@@ -202,6 +202,9 @@
   rtc::scoped_ptr<rtc::CpuMonitor> cpu_monitor_;
   WebRtcVideoChannelFactory* channel_factory_;
   WebRtcVideoEncoderFactory2 default_video_encoder_factory_;
+
+  WebRtcVideoDecoderFactory* external_decoder_factory_;
+  WebRtcVideoEncoderFactory* external_encoder_factory_;
 };
 
 class WebRtcVideoChannel2 : public rtc::MessageHandler,
@@ -328,13 +331,16 @@
       // Sent resolutions + bitrates etc. by the underlying VideoSendStream,
       // typically changes when setting a new resolution or reconfiguring
       // bitrates.
-      std::vector<webrtc::VideoStream> video_streams;
+      webrtc::VideoEncoderConfig encoder_config;
     };
 
     void SetCodecAndOptions(const VideoCodecSettings& codec,
-                            const VideoOptions& options);
-    void RecreateWebRtcStream();
-    void SetDimensions(int width, int height);
+                            const VideoOptions& options)
+        EXCLUSIVE_LOCKS_REQUIRED(lock_);
+    void RecreateWebRtcStream() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+    // When |override_max| is false constrain width/height to codec dimensions.
+    void SetDimensions(int width, int height, bool override_max)
+        EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
     webrtc::Call* const call_;
     WebRtcVideoEncoderFactory2* const encoder_factory_;
diff --git a/media/webrtc/webrtcvideoengine2_unittest.cc b/media/webrtc/webrtcvideoengine2_unittest.cc
index 9c313e6..6112c50 100644
--- a/media/webrtc/webrtcvideoengine2_unittest.cc
+++ b/media/webrtc/webrtcvideoengine2_unittest.cc
@@ -68,13 +68,10 @@
 namespace cricket {
 FakeVideoSendStream::FakeVideoSendStream(
     const webrtc::VideoSendStream::Config& config,
-    const std::vector<webrtc::VideoStream>& video_streams,
-    const void* encoder_settings)
-    : sending_(false),
-      config_(config),
-      codec_settings_set_(false) {
+    const webrtc::VideoEncoderConfig& encoder_config)
+    : sending_(false), config_(config), codec_settings_set_(false) {
   assert(config.encoder_settings.encoder != NULL);
-  ReconfigureVideoEncoder(video_streams, encoder_settings);
+  ReconfigureVideoEncoder(encoder_config);
 }
 
 webrtc::VideoSendStream::Config FakeVideoSendStream::GetConfig() {
@@ -82,7 +79,7 @@
 }
 
 std::vector<webrtc::VideoStream> FakeVideoSendStream::GetVideoStreams() {
-  return video_streams_;
+  return encoder_config_.streams;
 }
 
 bool FakeVideoSendStream::IsSending() const {
@@ -104,15 +101,14 @@
 }
 
 bool FakeVideoSendStream::ReconfigureVideoEncoder(
-    const std::vector<webrtc::VideoStream>& streams,
-    const void* encoder_specific) {
-  video_streams_ = streams;
-  if (encoder_specific != NULL) {
+    const webrtc::VideoEncoderConfig& config) {
+  encoder_config_ = config;
+  if (config.encoder_specific_settings != NULL) {
     assert(config_.encoder_settings.payload_name == "VP8");
-    vp8_settings_ =
-        *reinterpret_cast<const webrtc::VideoCodecVP8*>(encoder_specific);
+    vp8_settings_ = *reinterpret_cast<const webrtc::VideoCodecVP8*>(
+                        config.encoder_specific_settings);
   }
-  codec_settings_set_ = encoder_specific != NULL;
+  codec_settings_set_ = config.encoder_specific_settings != NULL;
   return true;
 }
 
@@ -157,7 +153,9 @@
 void FakeVideoReceiveStream::GetCurrentReceiveCodec(webrtc::VideoCodec* codec) {
 }
 
-FakeCall::FakeCall() { SetVideoCodecs(GetDefaultVideoCodecs()); }
+FakeCall::FakeCall() : network_state_(kNetworkUp) {
+  SetVideoCodecs(GetDefaultVideoCodecs());
+}
 
 FakeCall::~FakeCall() {
   EXPECT_EQ(0u, video_send_streams_.size());
@@ -218,12 +216,15 @@
   return codecs;
 }
 
+webrtc::Call::NetworkState FakeCall::GetNetworkState() const {
+  return network_state_;
+}
+
 webrtc::VideoSendStream* FakeCall::CreateVideoSendStream(
     const webrtc::VideoSendStream::Config& config,
-    const std::vector<webrtc::VideoStream>& video_streams,
-    const void* encoder_settings) {
+    const webrtc::VideoEncoderConfig& encoder_config) {
   FakeVideoSendStream* fake_stream =
-      new FakeVideoSendStream(config, video_streams, encoder_settings);
+      new FakeVideoSendStream(config, encoder_config);
   video_send_streams_.push_back(fake_stream);
   return fake_stream;
 }
@@ -274,6 +275,10 @@
   return 0;
 }
 
+void FakeCall::SignalNetworkState(webrtc::Call::NetworkState state) {
+  network_state_ = state;
+}
+
 FakeWebRtcVideoChannel2::FakeWebRtcVideoChannel2(
     FakeCall* call,
     WebRtcVideoEngine2* engine,
@@ -289,6 +294,7 @@
 VoiceMediaChannel* FakeWebRtcVideoChannel2::GetVoiceChannel() {
   return voice_channel_;
 }
+
 FakeCall* FakeWebRtcVideoChannel2::GetFakeCall() {
   return fake_call_;
 }
@@ -309,7 +315,8 @@
 
 class WebRtcVideoEngine2Test : public testing::Test {
  public:
-  WebRtcVideoEngine2Test() : engine_(&factory_) {
+  WebRtcVideoEngine2Test() {
+    engine_.SetChannelFactory(&factory_);
     std::vector<VideoCodec> engine_codecs = engine_.codecs();
     assert(!engine_codecs.empty());
     bool codec_set = false;
@@ -535,15 +542,17 @@
 
 WEBRTC_BASE_TEST(AdaptResolution4x3);
 
-WEBRTC_BASE_TEST(MuteStream);
-
-WEBRTC_BASE_TEST(MultipleSendStreams);
-
 // TODO(juberti): Restore this test once we support sending 0 fps.
 WEBRTC_DISABLED_BASE_TEST(AdaptDropAllFrames);
 // TODO(juberti): Understand why we get decode errors on this test.
 WEBRTC_DISABLED_BASE_TEST(AdaptFramerate);
 
+WEBRTC_BASE_TEST(SendsLowerResolutionOnSmallerFrames);
+
+WEBRTC_BASE_TEST(MuteStream);
+
+WEBRTC_BASE_TEST(MultipleSendStreams);
+
 WEBRTC_BASE_TEST(SetSendStreamFormat0x0);
 
 // TODO(zhurunz): Fix the flakey test.
@@ -1611,8 +1620,17 @@
   FAIL() << "Not implemented.";  // TODO(pbos): Implement.
 }
 
-TEST_F(WebRtcVideoChannel2Test, DISABLED_OnReadyToSend) {
-  FAIL() << "Not implemented.";  // TODO(pbos): Implement.
+TEST_F(WebRtcVideoChannel2Test, OnReadyToSendSignalsNetworkState) {
+  EXPECT_EQ(webrtc::Call::kNetworkUp,
+            fake_channel_->GetFakeCall()->GetNetworkState());
+
+  channel_->OnReadyToSend(false);
+  EXPECT_EQ(webrtc::Call::kNetworkDown,
+            fake_channel_->GetFakeCall()->GetNetworkState());
+
+  channel_->OnReadyToSend(true);
+  EXPECT_EQ(webrtc::Call::kNetworkUp,
+            fake_channel_->GetFakeCall()->GetNetworkState());
 }
 
 TEST_F(WebRtcVideoChannel2Test, DISABLED_CaptureFrameTimestampToNtpTimestamp) {
diff --git a/media/webrtc/webrtcvideoengine2_unittest.h b/media/webrtc/webrtcvideoengine2_unittest.h
index 54e6f06..30f1efb 100644
--- a/media/webrtc/webrtcvideoengine2_unittest.h
+++ b/media/webrtc/webrtcvideoengine2_unittest.h
@@ -39,8 +39,7 @@
 class FakeVideoSendStream : public webrtc::VideoSendStream {
  public:
   FakeVideoSendStream(const webrtc::VideoSendStream::Config& config,
-                      const std::vector<webrtc::VideoStream>& video_streams,
-                      const void* encoder_settings);
+                      const webrtc::VideoEncoderConfig& encoder_config);
   webrtc::VideoSendStream::Config GetConfig();
   std::vector<webrtc::VideoStream> GetVideoStreams();
 
@@ -51,8 +50,7 @@
   virtual webrtc::VideoSendStream::Stats GetStats() const OVERRIDE;
 
   virtual bool ReconfigureVideoEncoder(
-      const std::vector<webrtc::VideoStream>& streams,
-      const void* encoder_specific);
+      const webrtc::VideoEncoderConfig& config) OVERRIDE;
 
   virtual webrtc::VideoSendStreamInput* Input() OVERRIDE;
 
@@ -61,7 +59,7 @@
 
   bool sending_;
   webrtc::VideoSendStream::Config config_;
-  std::vector<webrtc::VideoStream> video_streams_;
+  webrtc::VideoEncoderConfig encoder_config_;
   bool codec_settings_set_;
   webrtc::VideoCodecVP8 vp8_settings_;
 };
@@ -103,11 +101,12 @@
 
   std::vector<webrtc::VideoCodec> GetDefaultVideoCodecs();
 
+  webrtc::Call::NetworkState GetNetworkState() const;
+
  private:
   virtual webrtc::VideoSendStream* CreateVideoSendStream(
       const webrtc::VideoSendStream::Config& config,
-      const std::vector<webrtc::VideoStream>& video_streams,
-      const void* encoder_settings) OVERRIDE;
+      const webrtc::VideoEncoderConfig& encoder_config) OVERRIDE;
 
   virtual void DestroyVideoSendStream(
       webrtc::VideoSendStream* send_stream) OVERRIDE;
@@ -122,6 +121,9 @@
   virtual uint32_t SendBitrateEstimate() OVERRIDE;
   virtual uint32_t ReceiveBitrateEstimate() OVERRIDE;
 
+  virtual void SignalNetworkState(webrtc::Call::NetworkState state) OVERRIDE;
+
+  webrtc::Call::NetworkState network_state_;
   std::vector<webrtc::VideoCodec> codecs_;
   std::vector<FakeVideoSendStream*> video_send_streams_;
   std::vector<FakeVideoReceiveStream*> video_receive_streams_;
diff --git a/media/webrtc/webrtcvideoengine_unittest.cc b/media/webrtc/webrtcvideoengine_unittest.cc
index 11edd05..35f05e1 100644
--- a/media/webrtc/webrtcvideoengine_unittest.cc
+++ b/media/webrtc/webrtcvideoengine_unittest.cc
@@ -26,22 +26,16 @@
  */
 
 #include "talk/media/base/constants.h"
-#include "talk/media/base/fakemediaprocessor.h"
 #include "talk/media/base/fakenetworkinterface.h"
-#include "talk/media/base/fakevideorenderer.h"
 #include "talk/media/base/mediachannel.h"
 #include "talk/media/base/testutils.h"
-#include "talk/media/base/videoadapter.h"
 #include "talk/media/base/videoengine_unittest.h"
-#include "talk/media/webrtc/fakewebrtcvideocapturemodule.h"
 #include "talk/media/webrtc/fakewebrtcvideoengine.h"
-#include "talk/media/webrtc/fakewebrtcvoiceengine.h"
 #include "webrtc/base/fakecpumonitor.h"
 #include "webrtc/base/gunit.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/stream.h"
-#include "talk/media/webrtc/webrtcvideocapturer.h"
 #include "talk/media/webrtc/webrtcvideoengine.h"
 #include "talk/media/webrtc/webrtcvideoframe.h"
 #include "talk/media/webrtc/webrtcvoiceengine.h"
@@ -54,9 +48,6 @@
 using cricket::kRtpAbsoluteSenderTimeHeaderExtension;
 
 static const cricket::VideoCodec kVP8Codec720p(100, "VP8", 1280, 720, 30, 0);
-static const cricket::VideoCodec kVP8Codec360p(100, "VP8", 640, 360, 30, 0);
-static const cricket::VideoCodec kVP8Codec270p(100, "VP8", 480, 270, 30, 0);
-static const cricket::VideoCodec kVP8Codec180p(100, "VP8", 320, 180, 30, 0);
 
 static const cricket::VideoCodec kVP8Codec(100, "VP8", 640, 400, 30, 0);
 static const cricket::VideoCodec kH264Codec(127, "H264", 640, 400, 30, 0);
@@ -78,7 +69,6 @@
 static const uint32 kRtxSsrcs1[] = {4};
 static const uint32 kRtxSsrcs3[] = {4, 5, 6};
 
-
 class FakeViEWrapper : public cricket::ViEWrapper {
  public:
   explicit FakeViEWrapper(cricket::FakeWebRtcVideoEngine* engine)
@@ -1556,7 +1546,6 @@
                      768, kMinBandwidthKbps, kStartBandwidthKbps);
 }
 
-
 // Test that sending screencast frames doesn't change bitrate.
 TEST_F(WebRtcVideoEngineTestFake, SetBandwidthScreencast) {
   EXPECT_TRUE(SetupEngine());
@@ -1576,7 +1565,6 @@
   VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height, 0, 111);
 }
 
-
 // Test SetSendSsrc.
 TEST_F(WebRtcVideoEngineTestFake, SetSendSsrcAndCname) {
   EXPECT_TRUE(SetupEngine());
@@ -1597,7 +1585,6 @@
   EXPECT_STREQ("cname", rtcp_cname);
 }
 
-
 // Test that the local SSRC is the same on sending and receiving channels if the
 // receive channel is created before the send channel.
 TEST_F(WebRtcVideoEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
@@ -1618,7 +1605,6 @@
   EXPECT_EQ(1, vie_.GetNumSsrcs(receive_channel_num));
 }
 
-
 // Test SetOptions with denoising flag.
 TEST_F(WebRtcVideoEngineTestFake, SetOptionsWithDenoising) {
   EXPECT_TRUE(SetupEngine());
@@ -1707,7 +1693,6 @@
   EXPECT_EQ(-1, vie_.GetIncomingFrameNum(channel1));
 }
 
-
 TEST_F(WebRtcVideoEngineTestFake, SendReceiveBitratesStats) {
   EXPECT_TRUE(SetupEngine());
   cricket::VideoOptions options;
@@ -1839,7 +1824,6 @@
   EXPECT_EQ(high, 1.0f);
 }
 
-
 TEST_F(WebRtcVideoEngineTestFake, ResetCodecOnScreencast) {
   EXPECT_TRUE(SetupEngine());
   cricket::VideoOptions options;
@@ -1870,7 +1854,6 @@
   EXPECT_FALSE(gcodec.codecSpecific.VP8.denoisingOn);
 }
 
-
 TEST_F(WebRtcVideoEngineTestFake, DontRegisterDecoderIfFactoryIsNotGiven) {
   engine_.SetExternalDecoderFactory(NULL);
   EXPECT_TRUE(SetupEngine());
@@ -2397,9 +2380,6 @@
   EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
 }
 
-// Disable for TSan v2, see
-// https://code.google.com/p/webrtc/issues/detail?id=3671 for details.
-#if !defined(THREAD_SANITIZER)
 TEST_F(WebRtcVideoMediaChannelTest, GetRtpSendTimeExtension) {
   // Enable RTP timestamp extension.
   const int id = 12;
@@ -2411,7 +2391,6 @@
   EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
   EXPECT_EQ(id, channel_->GetRtpSendTimeExtnId());
 }
-#endif  // if !defined(THREAD_SANITIZER)
 
 TEST_F(WebRtcVideoMediaChannelTest, SetSend) {
   Base::SetSend();
@@ -2574,7 +2553,6 @@
   Base::RejectEmptyStreamParams();
 }
 
-
 TEST_F(WebRtcVideoMediaChannelTest, AdaptResolution16x10) {
   Base::AdaptResolution16x10();
 }
diff --git a/media/webrtc/webrtcvideoframe_unittest.cc b/media/webrtc/webrtcvideoframe_unittest.cc
index 4cfe7c0..5f65c58 100644
--- a/media/webrtc/webrtcvideoframe_unittest.cc
+++ b/media/webrtc/webrtcvideoframe_unittest.cc
@@ -27,14 +27,10 @@
 
 #include "talk/media/base/videoframe_unittest.h"
 #include "talk/media/webrtc/webrtcvideoframe.h"
-#include "webrtc/base/flags.h"
-
-extern int FLAG_yuvconverter_repeat;  // From lmivideoframe_unittest.cc.
 
 class WebRtcVideoFrameTest : public VideoFrameTest<cricket::WebRtcVideoFrame> {
  public:
   WebRtcVideoFrameTest() {
-    repeat_ = FLAG_yuvconverter_repeat;
   }
 
   void TestInit(int cropped_width, int cropped_height) {
@@ -136,7 +132,7 @@
 TEST_WEBRTCVIDEOFRAME(ConstructBlack)
 // TODO(fbarchard): Implement Jpeg
 // TEST_WEBRTCVIDEOFRAME(ConstructMjpgI420)
-// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI422)
+TEST_WEBRTCVIDEOFRAME(ConstructMjpgI422)
 // TEST_WEBRTCVIDEOFRAME(ConstructMjpgI444)
 // TEST_WEBRTCVIDEOFRAME(ConstructMjpgI411)
 // TEST_WEBRTCVIDEOFRAME(ConstructMjpgI400)
diff --git a/media/webrtc/webrtcvoiceengine.cc b/media/webrtc/webrtcvoiceengine.cc
index 5cf53e4..a524bad 100644
--- a/media/webrtc/webrtcvoiceengine.cc
+++ b/media/webrtc/webrtcvoiceengine.cc
@@ -52,6 +52,7 @@
 #include "webrtc/base/stringutils.h"
 #include "webrtc/common.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/video_engine/include/vie_network.h"
 
 #ifdef WIN32
 #include <objbase.h>  // NOLINT
@@ -119,6 +120,7 @@
 // Opus bitrate should be in the range between 6000 and 510000.
 static const int kOpusMinBitrate = 6000;
 static const int kOpusMaxBitrate = 510000;
+
 // Default audio dscp value.
 // See http://tools.ietf.org/html/rfc2474 for details.
 // See also http://tools.ietf.org/html/draft-jennings-rtcweb-qos-00
@@ -403,6 +405,7 @@
   return codec.GetParam(kCodecParamStereo, &value) && value == 1;
 }
 
+// TODO(minyue): Clamp bitrate when invalid.
 static bool IsValidOpusBitrate(int bitrate) {
   return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
 }
@@ -429,6 +432,59 @@
   return codec.GetParam(kCodecParamUseInbandFec, &value) && value == 1;
 }
 
+// Returns kOpusDefaultPlaybackRate if params[kCodecParamMaxPlaybackRate] is not
+// defined. Returns the value of params[kCodecParamMaxPlaybackRate] otherwise.
+static int GetOpusMaxPlaybackRate(const AudioCodec& codec) {
+  int value;
+  if (codec.GetParam(kCodecParamMaxPlaybackRate, &value)) {
+    return value;
+  }
+  return kOpusDefaultMaxPlaybackRate;
+}
+
+static void GetOpusConfig(const AudioCodec& codec, webrtc::CodecInst* voe_codec,
+                          bool* enable_codec_fec, int* max_playback_rate) {
+  *enable_codec_fec = IsOpusFecEnabled(codec);
+  *max_playback_rate = GetOpusMaxPlaybackRate(codec);
+
+  // If OPUS, change what we send according to the "stereo" codec
+  // parameter, and not the "channels" parameter.  We set
+  // voe_codec.channels to 2 if "stereo=1" and 1 otherwise.  If
+  // the bitrate is not specified, i.e. is zero, we set it to the
+  // appropriate default value for mono or stereo Opus.
+
+  // TODO(minyue): The determination of bit rate might take the maximum playback
+  // rate into account.
+
+  if (IsOpusStereoEnabled(codec)) {
+    voe_codec->channels = 2;
+    if (!IsValidOpusBitrate(codec.bitrate)) {
+      if (codec.bitrate != 0) {
+        LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+                        << codec.bitrate
+                        << ") with default opus stereo bitrate: "
+                        << kOpusStereoBitrate;
+      }
+      voe_codec->rate = kOpusStereoBitrate;
+    }
+  } else {
+    voe_codec->channels = 1;
+    if (!IsValidOpusBitrate(codec.bitrate)) {
+      if (codec.bitrate != 0) {
+        LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+                        << codec.bitrate
+                        << ") with default opus mono bitrate: "
+                        << kOpusMonoBitrate;
+      }
+      voe_codec->rate = kOpusMonoBitrate;
+    }
+  }
+  int bitrate_from_params = GetOpusBitrateFromParams(codec);
+  if (bitrate_from_params != 0) {
+    voe_codec->rate = bitrate_from_params;
+  }
+}
+
 void WebRtcVoiceEngine::ConstructCodecs() {
   LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
   int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
@@ -805,30 +861,6 @@
     }
   }
 
-  bool experimental_ns;
-  if (options.experimental_ns.Get(&experimental_ns)) {
-    webrtc::AudioProcessing* audioproc =
-        voe_wrapper_->base()->audio_processing();
-#ifdef USE_WEBRTC_DEV_BRANCH
-    webrtc::Config config;
-    config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(
-        experimental_ns));
-    audioproc->SetExtraOptions(config);
-#else
-    // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
-    // returns NULL on audio_processing().
-    if (audioproc) {
-      if (audioproc->EnableExperimentalNs(experimental_ns) == -1) {
-        LOG_RTCERR1(EnableExperimentalNs, experimental_ns);
-        return false;
-      }
-    } else {
-      LOG(LS_VERBOSE) << "Experimental noise suppression set to "
-                      << experimental_ns;
-    }
-#endif
-  }
-
   bool highpass_filter;
   if (options.highpass_filter.Get(&highpass_filter)) {
     LOG(LS_INFO) << "High pass filter enabled? " << highpass_filter;
@@ -874,20 +906,50 @@
       StopAecDump();
   }
 
+  webrtc::Config config;
+
+  experimental_aec_.SetFrom(options.experimental_aec);
   bool experimental_aec;
-  if (options.experimental_aec.Get(&experimental_aec)) {
-    LOG(LS_INFO) << "Experimental aec is " << experimental_aec;
-    webrtc::AudioProcessing* audioproc =
-        voe_wrapper_->base()->audio_processing();
+  if (experimental_aec_.Get(&experimental_aec)) {
+    LOG(LS_INFO) << "Experimental aec is enabled? " << experimental_aec;
+    config.Set<webrtc::DelayCorrection>(
+        new webrtc::DelayCorrection(experimental_aec));
+  }
+
+#ifdef USE_WEBRTC_DEV_BRANCH
+  experimental_ns_.SetFrom(options.experimental_ns);
+  bool experimental_ns;
+  if (experimental_ns_.Get(&experimental_ns)) {
+    LOG(LS_INFO) << "Experimental ns is enabled? " << experimental_ns;
+    config.Set<webrtc::ExperimentalNs>(
+        new webrtc::ExperimentalNs(experimental_ns));
+  }
+#endif
+
+  // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
+  // returns NULL on audio_processing().
+  webrtc::AudioProcessing* audioproc = voe_wrapper_->base()->audio_processing();
+  if (audioproc) {
+    audioproc->SetExtraOptions(config);
+  }
+
+#ifndef USE_WEBRTC_DEV_BRANCH
+  bool experimental_ns;
+  if (options.experimental_ns.Get(&experimental_ns)) {
+    LOG(LS_INFO) << "Experimental ns is enabled? " << experimental_ns;
     // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
     // returns NULL on audio_processing().
     if (audioproc) {
-      webrtc::Config config;
-      config.Set<webrtc::DelayCorrection>(
-          new webrtc::DelayCorrection(experimental_aec));
-      audioproc->SetExtraOptions(config);
+      if (audioproc->EnableExperimentalNs(experimental_ns) == -1) {
+        LOG_RTCERR1(EnableExperimentalNs, experimental_ns);
+        return false;
+      }
+    } else {
+      LOG(LS_VERBOSE) << "Experimental noise suppression set to "
+                      << experimental_ns;
     }
   }
+#endif
 
   uint32 recording_sample_rate;
   if (options.recording_sample_rate.Get(&recording_sample_rate)) {
@@ -1759,6 +1821,8 @@
       typing_noise_detected_(false),
       desired_send_(SEND_NOTHING),
       send_(SEND_NOTHING),
+      shared_bwe_vie_(NULL),
+      shared_bwe_vie_channel_(-1),
       default_receive_ssrc_(0) {
   engine->RegisterChannel(this);
   LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
@@ -1770,6 +1834,7 @@
 WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
   LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
                   << voe_channel();
+  SetupSharedBandwidthEstimation(NULL, -1);
 
   // Remove any remaining send streams, the default channel will be deleted
   // later.
@@ -1870,6 +1935,12 @@
     }
   }
 
+  // Force update of Video Engine BWE forwarding to reflect experiment setting.
+  if (!SetupSharedBandwidthEstimation(shared_bwe_vie_,
+                                      shared_bwe_vie_channel_)) {
+    return false;
+  }
+
   LOG(LS_INFO) << "Set voice channel options.  Current options: "
                << options_.ToString();
   return true;
@@ -1976,6 +2047,10 @@
   bool nack_enabled = nack_enabled_;
   bool enable_codec_fec = false;
 
+  // max_playback_rate <= 0 will not trigger setting of maximum encoding
+  // bandwidth.
+  int max_playback_rate = 0;
+
   // Set send codec (the first non-telephone-event/CN codec)
   for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
        it != codecs.end(); ++it) {
@@ -1992,40 +2067,6 @@
       continue;
     }
 
-    // If OPUS, change what we send according to the "stereo" codec
-    // parameter, and not the "channels" parameter.  We set
-    // voe_codec.channels to 2 if "stereo=1" and 1 otherwise.  If
-    // the bitrate is not specified, i.e. is zero, we set it to the
-    // appropriate default value for mono or stereo Opus.
-    if (IsOpus(*it)) {
-      if (IsOpusStereoEnabled(*it)) {
-        voe_codec.channels = 2;
-        if (!IsValidOpusBitrate(it->bitrate)) {
-          if (it->bitrate != 0) {
-            LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
-                            << it->bitrate
-                            << ") with default opus stereo bitrate: "
-                            << kOpusStereoBitrate;
-          }
-          voe_codec.rate = kOpusStereoBitrate;
-        }
-      } else {
-        voe_codec.channels = 1;
-        if (!IsValidOpusBitrate(it->bitrate)) {
-          if (it->bitrate != 0) {
-            LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
-                            << it->bitrate
-                            << ") with default opus mono bitrate: "
-                            << kOpusMonoBitrate;
-          }
-          voe_codec.rate = kOpusMonoBitrate;
-        }
-      }
-      int bitrate_from_params = GetOpusBitrateFromParams(*it);
-      if (bitrate_from_params != 0) {
-        voe_codec.rate = bitrate_from_params;
-      }
-    }
 
     // We'll use the first codec in the list to actually send audio data.
     // Be sure to use the payload type requested by the remote side.
@@ -2055,8 +2096,11 @@
     } else {
       send_codec = voe_codec;
       nack_enabled = IsNackEnabled(*it);
-      // For Opus as the send codec, we enable inband FEC if requested.
-      enable_codec_fec = IsOpus(*it) && IsOpusFecEnabled(*it);
+      // For Opus as the send codec, we are to enable inband FEC if requested
+      // and set maximum playback rate.
+      if (IsOpus(*it)) {
+        GetOpusConfig(*it, &send_codec, &enable_codec_fec, &max_playback_rate);
+      }
     }
     found_send_codec = true;
     break;
@@ -2090,6 +2134,21 @@
 #endif  // USE_WEBRTC_DEV_BRANCH
   }
 
+  // maxplaybackrate should be set after SetSendCodec.
+  if (max_playback_rate > 0) {
+    LOG(LS_INFO) << "Attempt to set maximum playback rate to "
+                 << max_playback_rate
+                 << " Hz on channel "
+                 << channel;
+#ifdef USE_WEBRTC_DEV_BRANCH
+    // (max_playback_rate + 1) >> 1 is to obtain ceil(max_playback_rate / 2.0).
+    if (engine()->voe()->codec()->SetOpusMaxPlaybackRate(
+        channel, max_playback_rate) == -1) {
+      LOG(LS_WARNING) << "Could not set maximum playback rate.";
+    }
+#endif
+  }
+
   // Always update the |send_codec_| to the currently set send codec.
   send_codec_.reset(new webrtc::CodecInst(send_codec));
 
@@ -2531,8 +2590,8 @@
   }
 
   if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
-     LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
-     return false;
+    LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
+    return false;
   }
 
   // Set the current codecs to be used for the new channel.
@@ -2604,6 +2663,9 @@
     receive_channels_.insert(std::make_pair(
         default_receive_ssrc_,
         new WebRtcVoiceChannelRenderer(voe_channel(), audio_transport)));
+    if (!SetupSharedBweOnChannel(voe_channel())) {
+      return false;
+    }
     return SetPlayout(voe_channel(), playout_);
   }
 
@@ -2691,6 +2753,11 @@
     return false;
   }
 
+  // Set up channel to be able to forward incoming packets to video engine BWE.
+  if (!SetupSharedBweOnChannel(channel)) {
+    return false;
+  }
+
   return SetPlayout(channel, playout_);
 }
 
@@ -3060,7 +3127,8 @@
   engine()->voe()->network()->ReceivedRTPPacket(
       which_channel,
       packet->data(),
-      static_cast<unsigned int>(packet->length()));
+      static_cast<unsigned int>(packet->length()),
+      webrtc::PacketTime(packet_time.timestamp, packet_time.not_before));
 }
 
 void WebRtcVoiceMediaChannel::OnRtcpReceived(
@@ -3454,6 +3522,23 @@
   return -1;
 }
 
+bool WebRtcVoiceMediaChannel::SetupSharedBandwidthEstimation(
+    webrtc::VideoEngine* vie, int vie_channel) {
+  shared_bwe_vie_ = vie;
+  shared_bwe_vie_channel_ = vie_channel;
+
+  if (!SetupSharedBweOnChannel(voe_channel())) {
+    return false;
+  }
+  for (ChannelMap::iterator it = receive_channels_.begin();
+      it != receive_channels_.end(); ++it) {
+    if (!SetupSharedBweOnChannel(it->second->channel())) {
+      return false;
+    }
+  }
+  return true;
+}
+
 bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
     const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
   // Get the RED encodings from the parameter with no name. This may
@@ -3603,6 +3688,25 @@
   return true;
 }
 
+bool WebRtcVoiceMediaChannel::SetupSharedBweOnChannel(int voe_channel) {
+  webrtc::ViENetwork* vie_network = NULL;
+  int vie_channel = -1;
+  if (options_.combined_audio_video_bwe.GetWithDefaultIfUnset(false) &&
+      shared_bwe_vie_ != NULL && shared_bwe_vie_channel_ != -1) {
+    vie_network = webrtc::ViENetwork::GetInterface(shared_bwe_vie_);
+    vie_channel = shared_bwe_vie_channel_;
+  }
+  if (engine()->voe()->rtp()->SetVideoEngineBWETarget(voe_channel, vie_network,
+      vie_channel) == -1) {
+    LOG_RTCERR3(SetVideoEngineBWETarget, voe_channel, vie_network, vie_channel);
+    if (vie_network != NULL) {
+      // Don't fail if we're tearing down.
+      return false;
+    }
+  }
+  return true;
+}
+
 int WebRtcSoundclipStream::Read(void *buf, int len) {
   size_t res = 0;
   mem_.Read(buf, len, &res, NULL);
diff --git a/media/webrtc/webrtcvoiceengine.h b/media/webrtc/webrtcvoiceengine.h
index 69705cc..5557af0 100644
--- a/media/webrtc/webrtcvoiceengine.h
+++ b/media/webrtc/webrtcvoiceengine.h
@@ -54,6 +54,10 @@
 #error "Bogus include."
 #endif
 
+namespace webrtc {
+class VideoEngine;
+}
+
 namespace cricket {
 
 // WebRtcSoundclipStream is an adapter object that allows a memory stream to be
@@ -280,6 +284,13 @@
   uint32 rx_processor_ssrc_;
 
   rtc::CriticalSection signal_media_critical_;
+
+  // Cache received experimental_aec and experimental_ns values, and apply them
+  // in case they are missing in the audio options. We need to do this because
+  // SetExtraOptions() will revert to defaults for options which are not
+  // provided.
+  Settable<bool> experimental_aec_;
+  Settable<bool> experimental_ns_;
 };
 
 // WebRtcMediaChannel is a class that implements the common WebRtc channel
@@ -377,6 +388,8 @@
   int GetReceiveChannelNum(uint32 ssrc);
   int GetSendChannelNum(uint32 ssrc);
 
+  bool SetupSharedBandwidthEstimation(webrtc::VideoEngine* vie,
+                                      int vie_channel);
  protected:
   int GetLastEngineError() { return engine()->GetLastEngineError(); }
   int GetOutputLevel(int channel);
@@ -419,6 +432,7 @@
 
   bool SetHeaderExtension(ExtensionSetterFunction setter, int channel_id,
                           const RtpHeaderExtension* extension);
+  bool SetupSharedBweOnChannel(int voe_channel);
 
   bool SetChannelRecvRtpHeaderExtensions(
     int channel_id,
@@ -442,6 +456,11 @@
   bool typing_noise_detected_;
   SendFlags desired_send_;
   SendFlags send_;
+  // shared_bwe_vie_ and shared_bwe_vie_channel_ together identifies a WebRTC
+  // VideoEngine channel that this voice channel should forward incoming packets
+  // to for Bandwidth Estimation purposes.
+  webrtc::VideoEngine* shared_bwe_vie_;
+  int shared_bwe_vie_channel_;
 
   // send_channels_ contains the channels which are being used for sending.
   // When the default channel (voe_channel) is used for sending, it is
diff --git a/media/webrtc/webrtcvoiceengine_unittest.cc b/media/webrtc/webrtcvoiceengine_unittest.cc
index 89d4c4d..b044e92 100644
--- a/media/webrtc/webrtcvoiceengine_unittest.cc
+++ b/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -38,6 +38,7 @@
 #include "talk/media/base/fakenetworkinterface.h"
 #include "talk/media/base/fakertp.h"
 #include "talk/media/webrtc/fakewebrtcvoiceengine.h"
+#include "talk/media/webrtc/webrtcvie.h"
 #include "talk/media/webrtc/webrtcvoiceengine.h"
 #include "talk/p2p/base/fakesession.h"
 #include "talk/session/media/channel.h"
@@ -1151,7 +1152,6 @@
   int channel_num = voe_.GetLastChannel();
   std::vector<cricket::AudioCodec> codecs;
   codecs.push_back(kOpusCodec);
-  codecs[0].bitrate = 0;
   EXPECT_TRUE(channel_->SetSendCodecs(codecs));
   EXPECT_FALSE(voe_.GetCodecFEC(channel_num));
 }
@@ -1228,6 +1228,159 @@
   EXPECT_TRUE(channel_->SetSendCodecs(codecs));
   EXPECT_FALSE(voe_.GetCodecFEC(channel_num));
 }
+
+// Test that Opus FEC status can be changed.
+TEST_F(WebRtcVoiceEngineTestFake, ChangeOpusFecStatus) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_FALSE(voe_.GetCodecFEC(channel_num));
+  codecs[0].params["useinbandfec"] = "1";
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_TRUE(voe_.GetCodecFEC(channel_num));
+}
+
+// Test maxplaybackrate <= 8000 triggers Opus narrow band mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateNb) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  codecs[0].bitrate = 0;
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 8000);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthNb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_STREQ("opus", gcodec.plname);
+  // TODO(minyue): Default bit rate is not but can in future be affected by
+  // kCodecParamMaxPlaybackRate.
+  EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test 8000 < maxplaybackrate <= 12000 triggers Opus medium band mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateMb) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  codecs[0].bitrate = 0;
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 8001);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthMb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_STREQ("opus", gcodec.plname);
+  // TODO(minyue): Default bit rate is not but can in future be affected by
+  // kCodecParamMaxPlaybackRate.
+  EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test 12000 < maxplaybackrate <= 16000 triggers Opus wide band mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateWb) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  codecs[0].bitrate = 0;
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 12001);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthWb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_STREQ("opus", gcodec.plname);
+  // TODO(minyue): Default bit rate is not but can in future be affected by
+  // kCodecParamMaxPlaybackRate.
+  EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test 16000 < maxplaybackrate <= 24000 triggers Opus super wide band mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateSwb) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  codecs[0].bitrate = 0;
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 16001);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthSwb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_STREQ("opus", gcodec.plname);
+  // TODO(minyue): Default bit rate is not but can in future be affected by
+  // kCodecParamMaxPlaybackRate.
+  EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test 24000 < maxplaybackrate triggers Opus full band mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateFb) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  codecs[0].bitrate = 0;
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 24001);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthFb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+  webrtc::CodecInst gcodec;
+  EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+  EXPECT_STREQ("opus", gcodec.plname);
+  // TODO(minyue): Default bit rate is not but can in future be affected by
+  // kCodecParamMaxPlaybackRate.
+  EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test Opus that without maxplaybackrate, default playback rate is used.
+TEST_F(WebRtcVoiceEngineTestFake, DefaultOpusMaxPlaybackRate) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthFb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+}
+
+// Test the with non-Opus, maxplaybackrate has no effect.
+TEST_F(WebRtcVoiceEngineTestFake, SetNonOpusMaxPlaybackRate) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kIsacCodec);
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 32000);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(0, voe_.GetMaxEncodingBandwidth(channel_num));
+}
+
+// Test maxplaybackrate can be set on two streams.
+TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateOnTwoStreams) {
+  EXPECT_TRUE(SetupEngine());
+  int channel_num = voe_.GetLastChannel();
+  std::vector<cricket::AudioCodec> codecs;
+  codecs.push_back(kOpusCodec);
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  // Default bandwidth is 24000.
+  EXPECT_EQ(cricket::kOpusBandwidthFb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+
+  codecs[0].SetParam(cricket::kCodecParamMaxPlaybackRate, 8000);
+
+  EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+  EXPECT_EQ(cricket::kOpusBandwidthNb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+
+  channel_->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrc2));
+  channel_num = voe_.GetLastChannel();
+  EXPECT_EQ(cricket::kOpusBandwidthNb,
+            voe_.GetMaxEncodingBandwidth(channel_num));
+}
 #endif  // USE_WEBRTC_DEV_BRANCH
 
 // Test that we can apply CELT with stereo mode but fail with mono mode.
@@ -3160,3 +3313,113 @@
   CoUninitialize();
 }
 #endif
+
+TEST_F(WebRtcVoiceEngineTestFake, ChangeCombinedAudioVideoBweOption) {
+  // Test that changing the combined_audio_video_bwe option results in the
+  // expected state changes in VoiceEngine.
+  cricket::ViEWrapper vie;
+  const int kVieCh = 667;
+
+  EXPECT_TRUE(SetupEngine());
+  cricket::WebRtcVoiceMediaChannel* media_channel =
+      static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(vie.engine(),
+                                                            kVieCh));
+  EXPECT_TRUE(media_channel->AddRecvStream(
+      cricket::StreamParams::CreateLegacy(2)));
+  int recv_ch = voe_.GetLastChannel();
+
+  // Combined BWE should not be set up yet.
+  EXPECT_EQ(NULL, voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(-1, voe_.GetVideoChannel(recv_ch));
+
+  // Enable combined BWE option - now it should be set up.
+  cricket::AudioOptions options;
+  options.combined_audio_video_bwe.Set(true);
+  EXPECT_TRUE(media_channel->SetOptions(options));
+  EXPECT_EQ(vie.network(), voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(kVieCh, voe_.GetVideoChannel(recv_ch));
+
+  // Disable combined BWE option - should be disabled again.
+  options.combined_audio_video_bwe.Set(false);
+  EXPECT_TRUE(media_channel->SetOptions(options));
+  EXPECT_EQ(NULL, voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(-1, voe_.GetVideoChannel(recv_ch));
+
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(NULL, -1));
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetupSharedBandwidthEstimation) {
+  // Test that calling SetupSharedBandwidthEstimation() on the voice media
+  // channel results in the expected state changes in VoiceEngine.
+  cricket::ViEWrapper vie1;
+  cricket::ViEWrapper vie2;
+  const int kVieCh1 = 667;
+  const int kVieCh2 = 70;
+
+  EXPECT_TRUE(SetupEngine());
+  cricket::WebRtcVoiceMediaChannel* media_channel =
+      static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+  cricket::AudioOptions options;
+  options.combined_audio_video_bwe.Set(true);
+  EXPECT_TRUE(media_channel->SetOptions(options));
+  EXPECT_TRUE(media_channel->AddRecvStream(
+      cricket::StreamParams::CreateLegacy(2)));
+  int recv_ch = voe_.GetLastChannel();
+
+  // Combined BWE should not be set up yet.
+  EXPECT_EQ(NULL, voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(-1, voe_.GetVideoChannel(recv_ch));
+
+  // Register - should be enabled.
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(vie1.engine(),
+                                                            kVieCh1));
+  EXPECT_EQ(vie1.network(), voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(kVieCh1, voe_.GetVideoChannel(recv_ch));
+
+  // Re-register - should still be enabled.
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(vie2.engine(),
+                                                            kVieCh2));
+  EXPECT_EQ(vie2.network(), voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(kVieCh2, voe_.GetVideoChannel(recv_ch));
+
+  // Unregister - should be disabled again.
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(NULL, -1));
+  EXPECT_EQ(NULL, voe_.GetViENetwork(recv_ch));
+  EXPECT_EQ(-1, voe_.GetVideoChannel(recv_ch));
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, ConfigureCombinedBweForNewRecvStreams) {
+  // Test that adding receive streams after enabling combined bandwidth
+  // estimation will correctly configure each channel.
+  cricket::ViEWrapper vie;
+  const int kVieCh = 667;
+
+  EXPECT_TRUE(SetupEngine());
+  cricket::WebRtcVoiceMediaChannel* media_channel =
+      static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(vie.engine(),
+                                                            kVieCh));
+  cricket::AudioOptions options;
+  options.combined_audio_video_bwe.Set(true);
+  EXPECT_TRUE(media_channel->SetOptions(options));
+
+  static const uint32 kSsrcs[] = {1, 2, 3, 4};
+  int voe_channels[ARRAY_SIZE(kSsrcs)] = {0};
+  for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs); ++i) {
+    EXPECT_TRUE(media_channel->AddRecvStream(
+        cricket::StreamParams::CreateLegacy(kSsrcs[i])));
+    int recv_ch = media_channel->GetReceiveChannelNum(kSsrcs[i]);
+    EXPECT_NE(-1, recv_ch);
+    voe_channels[i] = recv_ch;
+    EXPECT_EQ(vie.network(), voe_.GetViENetwork(recv_ch));
+    EXPECT_EQ(kVieCh, voe_.GetVideoChannel(recv_ch));
+  }
+
+  EXPECT_TRUE(media_channel->SetupSharedBandwidthEstimation(NULL, -1));
+
+  for (unsigned int i = 0; i < ARRAY_SIZE(voe_channels); ++i) {
+    EXPECT_EQ(NULL, voe_.GetViENetwork(voe_channels[i]));
+    EXPECT_EQ(-1, voe_.GetVideoChannel(voe_channels[i]));
+  }
+}
diff --git a/p2p/base/asyncstuntcpsocket.h b/p2p/base/asyncstuntcpsocket.h
index b63c0b5..136b4df 100644
--- a/p2p/base/asyncstuntcpsocket.h
+++ b/p2p/base/asyncstuntcpsocket.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef TALK_BASE_ASYNCSTUNTCPSOCKET_H_
-#define TALK_BASE_ASYNCSTUNTCPSOCKET_H_
+#ifndef TALK_P2P_BASE_ASYNCSTUNTCPSOCKET_H_
+#define TALK_P2P_BASE_ASYNCSTUNTCPSOCKET_H_
 
 #include "webrtc/base/asynctcpsocket.h"
 #include "webrtc/base/scoped_ptr.h"
@@ -64,4 +64,4 @@
 
 }  // namespace cricket
 
-#endif  // TALK_BASE_ASYNCSTUNTCPSOCKET_H_
+#endif  // TALK_P2P_BASE_ASYNCSTUNTCPSOCKET_H_
diff --git a/p2p/base/basicpacketsocketfactory.h b/p2p/base/basicpacketsocketfactory.h
index b1bae35..77b1652 100644
--- a/p2p/base/basicpacketsocketfactory.h
+++ b/p2p/base/basicpacketsocketfactory.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef TALK_BASE_BASICPACKETSOCKETFACTORY_H_
-#define TALK_BASE_BASICPACKETSOCKETFACTORY_H_
+#ifndef TALK_P2P_BASE_BASICPACKETSOCKETFACTORY_H_
+#define TALK_P2P_BASE_BASICPACKETSOCKETFACTORY_H_
 
 #include "talk/p2p/base/packetsocketfactory.h"
 
@@ -65,4 +65,4 @@
 
 }  // namespace rtc
 
-#endif  // TALK_BASE_BASICPACKETSOCKETFACTORY_H_
+#endif  // TALK_P2P_BASE_BASICPACKETSOCKETFACTORY_H_
diff --git a/p2p/base/constants.cc b/p2p/base/constants.cc
index 2e57d9d..278a615 100644
--- a/p2p/base/constants.cc
+++ b/p2p/base/constants.cc
@@ -29,7 +29,7 @@
 
 #include <string>
 
-#include "talk/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/qname.h"
 
 namespace cricket {
 
diff --git a/p2p/base/constants.h b/p2p/base/constants.h
index 61dd815..4cd1166 100644
--- a/p2p/base/constants.h
+++ b/p2p/base/constants.h
@@ -29,7 +29,7 @@
 #define TALK_P2P_BASE_CONSTANTS_H_
 
 #include <string>
-#include "talk/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/qname.h"
 
 // This file contains constants related to signaling that are used in various
 // classes in this directory.
diff --git a/p2p/base/p2ptransport.cc b/p2p/base/p2ptransport.cc
index b992cc0..06941ac 100644
--- a/p2p/base/p2ptransport.cc
+++ b/p2p/base/p2ptransport.cc
@@ -35,8 +35,8 @@
 #include "talk/p2p/base/parsing.h"
 #include "talk/p2p/base/sessionmanager.h"
 #include "talk/p2p/base/sessionmessages.h"
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/base64.h"
 #include "webrtc/base/common.h"
diff --git a/p2p/base/packetsocketfactory.h b/p2p/base/packetsocketfactory.h
index 6b82682..46767c2 100644
--- a/p2p/base/packetsocketfactory.h
+++ b/p2p/base/packetsocketfactory.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef TALK_BASE_PACKETSOCKETFACTORY_H_
-#define TALK_BASE_PACKETSOCKETFACTORY_H_
+#ifndef TALK_P2P_BASE_PACKETSOCKETFACTORY_H_
+#define TALK_P2P_BASE_PACKETSOCKETFACTORY_H_
 
 #include "webrtc/base/proxyinfo.h"
 
@@ -66,4 +66,4 @@
 
 }  // namespace rtc
 
-#endif  // TALK_BASE_PACKETSOCKETFACTORY_H_
+#endif  // TALK_P2P_BASE_PACKETSOCKETFACTORY_H_
diff --git a/p2p/base/parsing.h b/p2p/base/parsing.h
index 635b122..2aab2f8 100644
--- a/p2p/base/parsing.h
+++ b/p2p/base/parsing.h
@@ -30,7 +30,7 @@
 
 #include <string>
 #include <vector>
-#include "talk/xmllite/xmlelement.h"  // Needed to delete ParseError.extra.
+#include "webrtc/libjingle/xmllite/xmlelement.h"  // Needed to delete ParseError.extra.
 #include "webrtc/base/basictypes.h"
 #include "webrtc/base/stringencode.h"
 
diff --git a/p2p/base/port.cc b/p2p/base/port.cc
index 3820154..0cf46eb 100644
--- a/p2p/base/port.cc
+++ b/p2p/base/port.cc
@@ -1212,7 +1212,7 @@
      << ":" << local.type() << ":" << local.protocol()
      << ":" << local.address().ToSensitiveString()
      << "->" << remote.id() << ":" << remote.component()
-     << ":" << remote.preference()
+     << ":" << remote.priority()
      << ":" << remote.type() << ":"
      << remote.protocol() << ":" << remote.address().ToSensitiveString() << "|"
      << CONNECT_STATE_ABBREV[connected()]
diff --git a/p2p/base/port.h b/p2p/base/port.h
index cccfdad..4893586 100644
--- a/p2p/base/port.h
+++ b/p2p/base/port.h
@@ -155,6 +155,7 @@
   uint64 IceTiebreaker() const { return tiebreaker_; }
 
   virtual bool SharedSocket() const { return shared_socket_; }
+  void ResetSharedSocket() { shared_socket_ = false; }
 
   // The thread on which this port performs its I/O.
   rtc::Thread* thread() { return thread_; }
diff --git a/p2p/base/portallocator.h b/p2p/base/portallocator.h
index 84e5fea..5bc389e 100644
--- a/p2p/base/portallocator.h
+++ b/p2p/base/portallocator.h
@@ -44,17 +44,18 @@
 // Clients can override this class to control port allocation, including
 // what kinds of ports are allocated.
 
-const uint32 PORTALLOCATOR_DISABLE_UDP = 0x01;
-const uint32 PORTALLOCATOR_DISABLE_STUN = 0x02;
-const uint32 PORTALLOCATOR_DISABLE_RELAY = 0x04;
-const uint32 PORTALLOCATOR_DISABLE_TCP = 0x08;
-const uint32 PORTALLOCATOR_ENABLE_SHAKER = 0x10;
-const uint32 PORTALLOCATOR_ENABLE_BUNDLE = 0x20;
-const uint32 PORTALLOCATOR_ENABLE_IPV6 = 0x40;
-const uint32 PORTALLOCATOR_ENABLE_SHARED_UFRAG = 0x80;
-const uint32 PORTALLOCATOR_ENABLE_SHARED_SOCKET = 0x100;
-const uint32 PORTALLOCATOR_ENABLE_STUN_RETRANSMIT_ATTRIBUTE = 0x200;
-const uint32 PORTALLOCATOR_ENABLE_TURN_SHARED_SOCKET = 0x400;
+enum {
+  PORTALLOCATOR_DISABLE_UDP = 0x01,
+  PORTALLOCATOR_DISABLE_STUN = 0x02,
+  PORTALLOCATOR_DISABLE_RELAY = 0x04,
+  PORTALLOCATOR_DISABLE_TCP = 0x08,
+  PORTALLOCATOR_ENABLE_SHAKER = 0x10,
+  PORTALLOCATOR_ENABLE_BUNDLE = 0x20,
+  PORTALLOCATOR_ENABLE_IPV6 = 0x40,
+  PORTALLOCATOR_ENABLE_SHARED_UFRAG = 0x80,
+  PORTALLOCATOR_ENABLE_SHARED_SOCKET = 0x100,
+  PORTALLOCATOR_ENABLE_STUN_RETRANSMIT_ATTRIBUTE = 0x200,
+};
 
 const uint32 kDefaultPortAllocatorFlags = 0;
 
@@ -63,6 +64,15 @@
 // internal. Less than 20ms is not acceptable. We choose 50ms as our default.
 const uint32 kMinimumStepDelay = 50;
 
+// CF = CANDIDATE FILTER
+enum {
+  CF_NONE = 0x0,
+  CF_HOST = 0x1,
+  CF_REFLEXIVE = 0x2,
+  CF_RELAY = 0x4,
+  CF_ALL = 0x7,
+};
+
 class PortAllocatorSessionMuxer;
 
 class PortAllocatorSession : public sigslot::has_slots<> {
@@ -118,7 +128,8 @@
       min_port_(0),
       max_port_(0),
       step_delay_(kDefaultStepDelay),
-      allow_tcp_listen_(true) {
+      allow_tcp_listen_(true),
+      candidate_filter_(CF_ALL) {
     // This will allow us to have old behavior on non webrtc clients.
   }
   virtual ~PortAllocator();
@@ -158,7 +169,6 @@
 
   uint32 step_delay() const { return step_delay_; }
   void set_step_delay(uint32 delay) {
-    ASSERT(delay >= kMinimumStepDelay);
     step_delay_ = delay;
   }
 
@@ -167,6 +177,13 @@
     allow_tcp_listen_ = allow_tcp_listen;
   }
 
+  uint32 candidate_filter() { return candidate_filter_; }
+  bool set_candidate_filter(uint32 filter) {
+    // TODO(mallinath) - Do transition check?
+    candidate_filter_ = filter;
+    return true;
+  }
+
  protected:
   virtual PortAllocatorSession* CreateSessionInternal(
       const std::string& content_name,
@@ -184,6 +201,7 @@
   uint32 step_delay_;
   SessionMuxerMap muxers_;
   bool allow_tcp_listen_;
+  uint32 candidate_filter_;
 };
 
 }  // namespace cricket
diff --git a/p2p/base/rawtransport.cc b/p2p/base/rawtransport.cc
index 5913177..2af1864 100644
--- a/p2p/base/rawtransport.cc
+++ b/p2p/base/rawtransport.cc
@@ -32,8 +32,8 @@
 #include "talk/p2p/base/rawtransport.h"
 #include "talk/p2p/base/rawtransportchannel.h"
 #include "talk/p2p/base/sessionmanager.h"
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/common.h"
 
diff --git a/p2p/base/rawtransportchannel.cc b/p2p/base/rawtransportchannel.cc
index ef0a532..ae268e3 100644
--- a/p2p/base/rawtransportchannel.cc
+++ b/p2p/base/rawtransportchannel.cc
@@ -36,8 +36,8 @@
 #include "talk/p2p/base/relayport.h"
 #include "talk/p2p/base/sessionmanager.h"
 #include "talk/p2p/base/stunport.h"
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/common.h"
 
diff --git a/p2p/base/session.h b/p2p/base/session.h
index 756bbe0..e06cf00 100644
--- a/p2p/base/session.h
+++ b/p2p/base/session.h
@@ -39,7 +39,7 @@
 #include "talk/p2p/base/sessionmanager.h"
 #include "talk/p2p/base/sessionmessages.h"
 #include "talk/p2p/base/transport.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/refcount.h"
 #include "webrtc/base/scoped_ptr.h"
diff --git a/p2p/base/session_unittest.cc b/p2p/base/session_unittest.cc
index 90cd06d..4674d2c 100644
--- a/p2p/base/session_unittest.cc
+++ b/p2p/base/session_unittest.cc
@@ -601,8 +601,7 @@
         network_("network", "unittest",
                  rtc::IPAddress(INADDR_LOOPBACK), 8),
         socket_factory_(rtc::Thread::Current()),
-        running_(false),
-        port_(28653) {
+        running_(false) {
     network_.AddIP(address_.ipaddr());
   }
 
@@ -655,7 +654,6 @@
   rtc::Network network_;
   rtc::BasicPacketSocketFactory socket_factory_;
   bool running_;
-  int port_;
 };
 
 class TestPortAllocator : public cricket::PortAllocator {
diff --git a/p2p/base/sessiondescription.cc b/p2p/base/sessiondescription.cc
index 7009aa8..7ad3d48 100644
--- a/p2p/base/sessiondescription.cc
+++ b/p2p/base/sessiondescription.cc
@@ -27,7 +27,7 @@
 
 #include "talk/p2p/base/sessiondescription.h"
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 
 namespace cricket {
 
diff --git a/p2p/base/sessionmessages.cc b/p2p/base/sessionmessages.cc
index bfba273..b2fd9d6 100644
--- a/p2p/base/sessionmessages.cc
+++ b/p2p/base/sessionmessages.cc
@@ -36,7 +36,7 @@
 #include "talk/p2p/base/sessionclient.h"
 #include "talk/p2p/base/sessiondescription.h"
 #include "talk/p2p/base/transport.h"
-#include "talk/xmllite/xmlconstants.h"
+#include "webrtc/libjingle/xmllite/xmlconstants.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/scoped_ptr.h"
diff --git a/p2p/base/sessionmessages.h b/p2p/base/sessionmessages.h
index 38fca98..7e1f8ac 100644
--- a/p2p/base/sessionmessages.h
+++ b/p2p/base/sessionmessages.h
@@ -36,7 +36,7 @@
 #include "talk/p2p/base/parsing.h"
 #include "talk/p2p/base/sessiondescription.h"  // Needed to delete contents.
 #include "talk/p2p/base/transportinfo.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "webrtc/base/basictypes.h"
 
 namespace cricket {
diff --git a/p2p/base/stun.cc b/p2p/base/stun.cc
index be96b76..061fd9a 100644
--- a/p2p/base/stun.cc
+++ b/p2p/base/stun.cc
@@ -41,6 +41,7 @@
 
 namespace cricket {
 
+const char STUN_ERROR_REASON_TRY_ALTERNATE_SERVER[] = "Try Alternate Server";
 const char STUN_ERROR_REASON_BAD_REQUEST[] = "Bad Request";
 const char STUN_ERROR_REASON_UNAUTHORIZED[] = "Unauthorized";
 const char STUN_ERROR_REASON_FORBIDDEN[] = "Forbidden";
@@ -401,7 +402,7 @@
     case STUN_ATTR_NONCE:               return STUN_VALUE_BYTE_STRING;
     case STUN_ATTR_XOR_MAPPED_ADDRESS:  return STUN_VALUE_XOR_ADDRESS;
     case STUN_ATTR_SOFTWARE:            return STUN_VALUE_BYTE_STRING;
-    case STUN_ATTR_ALTERNATE_SERVER:    return STUN_VALUE_BYTE_STRING;
+    case STUN_ATTR_ALTERNATE_SERVER:    return STUN_VALUE_ADDRESS;
     case STUN_ATTR_FINGERPRINT:         return STUN_VALUE_UINT32;
     case STUN_ATTR_RETRANSMIT_COUNT:    return STUN_VALUE_UINT32;
     default:                            return STUN_VALUE_UNKNOWN;
diff --git a/p2p/base/stun.h b/p2p/base/stun.h
index b22b51e..c4f522b 100644
--- a/p2p/base/stun.h
+++ b/p2p/base/stun.h
@@ -63,7 +63,7 @@
   STUN_ATTR_NONCE                       = 0x0015,  // ByteString
   STUN_ATTR_XOR_MAPPED_ADDRESS          = 0x0020,  // XorAddress
   STUN_ATTR_SOFTWARE                    = 0x8022,  // ByteString
-  STUN_ATTR_ALTERNATE_SERVER            = 0x8023,  // ByteString
+  STUN_ATTR_ALTERNATE_SERVER            = 0x8023,  // Address
   STUN_ATTR_FINGERPRINT                 = 0x8028,  // UInt32
   STUN_ATTR_RETRANSMIT_COUNT            = 0xFF00   // UInt32
 };
@@ -104,6 +104,7 @@
 };
 
 // Strings for the error codes above.
+extern const char STUN_ERROR_REASON_TRY_ALTERNATE_SERVER[];
 extern const char STUN_ERROR_REASON_BAD_REQUEST[];
 extern const char STUN_ERROR_REASON_UNAUTHORIZED[];
 extern const char STUN_ERROR_REASON_UNKNOWN_ATTRIBUTE[];
diff --git a/p2p/base/stunport.h b/p2p/base/stunport.h
index 0a49b67..b3b6d5b 100644
--- a/p2p/base/stunport.h
+++ b/p2p/base/stunport.h
@@ -82,7 +82,7 @@
     return socket_->GetLocalAddress();
   }
 
-  const ServerAddresses server_addresses() const {
+  const ServerAddresses& server_addresses() const {
     return server_addresses_;
   }
   void
diff --git a/p2p/base/testturnserver.h b/p2p/base/testturnserver.h
index e2c0ccb..6c30afe 100644
--- a/p2p/base/testturnserver.h
+++ b/p2p/base/testturnserver.h
@@ -29,6 +29,7 @@
 #define TALK_P2P_BASE_TESTTURNSERVER_H_
 
 #include <string>
+#include <vector>
 
 #include "talk/p2p/base/basicpacketsocketfactory.h"
 #include "talk/p2p/base/stun.h"
@@ -41,6 +42,27 @@
 static const char kTestRealm[] = "example.org";
 static const char kTestSoftware[] = "TestTurnServer";
 
+class TestTurnRedirector : public TurnRedirectInterface {
+ public:
+  explicit TestTurnRedirector(const std::vector<rtc::SocketAddress>& addresses)
+      : alternate_server_addresses_(addresses),
+        iter_(alternate_server_addresses_.begin()) {
+  }
+
+  virtual bool ShouldRedirect(const rtc::SocketAddress&,
+                              rtc::SocketAddress* out) {
+    if (!out || iter_ == alternate_server_addresses_.end()) {
+      return false;
+    }
+    *out = *iter_++;
+    return true;
+  }
+
+ private:
+  const std::vector<rtc::SocketAddress>& alternate_server_addresses_;
+  std::vector<rtc::SocketAddress>::const_iterator iter_;
+};
+
 class TestTurnServer : public TurnAuthInterface {
  public:
   TestTurnServer(rtc::Thread* thread,
@@ -61,6 +83,10 @@
 
   TurnServer* server() { return &server_; }
 
+  void set_redirect_hook(TurnRedirectInterface* redirect_hook) {
+    server_.set_redirect_hook(redirect_hook);
+  }
+
   void AddInternalSocket(const rtc::SocketAddress& int_addr,
                          ProtocolType proto) {
     rtc::Thread* thread = rtc::Thread::Current();
diff --git a/p2p/base/transport.cc b/p2p/base/transport.cc
index 6ee7b2a..d88f5e7 100644
--- a/p2p/base/transport.cc
+++ b/p2p/base/transport.cc
@@ -33,7 +33,7 @@
 #include "talk/p2p/base/port.h"
 #include "talk/p2p/base/sessionmanager.h"
 #include "talk/p2p/base/transportchannelimpl.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/bind.h"
 #include "webrtc/base/common.h"
diff --git a/p2p/base/transport_unittest.cc b/p2p/base/transport_unittest.cc
index fa2c116..8f7dae2 100644
--- a/p2p/base/transport_unittest.cc
+++ b/p2p/base/transport_unittest.cc
@@ -31,7 +31,7 @@
 #include "talk/p2p/base/parsing.h"
 #include "talk/p2p/base/rawtransport.h"
 #include "talk/p2p/base/sessionmessages.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/fakesslidentity.h"
 #include "webrtc/base/gunit.h"
diff --git a/p2p/base/turnport.cc b/p2p/base/turnport.cc
index 3faacd1..2908d71 100644
--- a/p2p/base/turnport.cc
+++ b/p2p/base/turnport.cc
@@ -51,6 +51,10 @@
 
 static const size_t TURN_CHANNEL_HEADER_SIZE = 4U;
 
+// Retry at most twice (i.e. three different ALLOCATE requests) on
+// STUN_ERROR_ALLOCATION_MISMATCH error per rfc5766.
+static const size_t MAX_ALLOCATE_MISMATCH_RETRIES = 2;
+
 inline bool IsTurnChannelData(uint16 msg_type) {
   return ((msg_type & 0xC000) == 0x4000);  // MSB are 0b01
 }
@@ -78,6 +82,7 @@
  private:
   // Handles authentication challenge from the server.
   void OnAuthChallenge(StunMessage* response, int code);
+  void OnTryAlternate(StunMessage* response, int code);
   void OnUnknownAttribute(StunMessage* response);
 
   TurnPort* port_;
@@ -187,7 +192,8 @@
       request_manager_(thread),
       next_channel_number_(TURN_CHANNEL_NUMBER_START),
       connected_(false),
-      server_priority_(server_priority) {
+      server_priority_(server_priority),
+      allocate_mismatch_retries_(0) {
   request_manager_.SignalSendPacket.connect(this, &TurnPort::OnSendStunPacket);
 }
 
@@ -211,7 +217,8 @@
       request_manager_(thread),
       next_channel_number_(TURN_CHANNEL_NUMBER_START),
       connected_(false),
-      server_priority_(server_priority) {
+      server_priority_(server_priority),
+      allocate_mismatch_retries_(0) {
   request_manager_.SignalSendPacket.connect(this, &TurnPort::OnSendStunPacket);
 }
 
@@ -253,6 +260,9 @@
       return;
     }
 
+    // Insert the current address to prevent redirection pingpong.
+    attempted_server_addresses_.insert(server_address_.address);
+
     LOG_J(LS_INFO, this) << "Trying to connect to TURN server via "
                          << ProtoToString(server_address_.proto) << " @ "
                          << server_address_.address.ToSensitiveString();
@@ -267,6 +277,8 @@
 }
 
 bool TurnPort::CreateTurnClientSocket() {
+  ASSERT(!socket_ || SharedSocket());
+
   if (server_address_.proto == PROTO_UDP && !SharedSocket()) {
     socket_ = socket_factory()->CreateUdpSocket(
         rtc::SocketAddress(ip(), 0), min_port(), max_port());
@@ -336,6 +348,29 @@
   }
 }
 
+void TurnPort::OnAllocateMismatch() {
+  if (allocate_mismatch_retries_ >= MAX_ALLOCATE_MISMATCH_RETRIES) {
+    LOG_J(LS_WARNING, this) << "Giving up on the port after "
+                            << allocate_mismatch_retries_
+                            << " retries for STUN_ERROR_ALLOCATION_MISMATCH";
+    OnAllocateError();
+    return;
+  }
+
+  LOG_J(LS_INFO, this) << "Allocating a new socket after "
+                       << "STUN_ERROR_ALLOCATION_MISMATCH, retry = "
+                       << allocate_mismatch_retries_ + 1;
+  if (SharedSocket()) {
+    ResetSharedSocket();
+  } else {
+    delete socket_;
+  }
+  socket_ = NULL;
+
+  PrepareAddress();
+  ++allocate_mismatch_retries_;
+}
+
 Connection* TurnPort::CreateConnection(const Candidate& address,
                                        CandidateOrigin origin) {
   // TURN-UDP can only connect to UDP candidates.
@@ -458,6 +493,38 @@
   }
 }
 
+
+// Update current server address port with the alternate server address port.
+bool TurnPort::SetAlternateServer(const rtc::SocketAddress& address) {
+  // Check if we have seen this address before and reject if we did.
+  AttemptedServerSet::iterator iter = attempted_server_addresses_.find(address);
+  if (iter != attempted_server_addresses_.end()) {
+    LOG_J(LS_WARNING, this) << "Redirection to ["
+                            << address.ToSensitiveString()
+                            << "] ignored, allocation failed.";
+    return false;
+  }
+
+  // If protocol family of server address doesn't match with local, return.
+  if (!IsCompatibleAddress(address)) {
+    LOG(LS_WARNING) << "Server IP address family does not match with "
+                    << "local host address family type";
+    return false;
+  }
+
+  LOG_J(LS_INFO, this) << "Redirecting from TURN server ["
+                       << server_address_.address.ToSensitiveString()
+                       << "] to TURN server ["
+                       << address.ToSensitiveString()
+                       << "]";
+  server_address_ = ProtocolAddress(address, server_address_.proto,
+                                    server_address_.secure);
+
+  // Insert the current address to prevent redirection pingpong.
+  attempted_server_addresses_.insert(server_address_.address);
+  return true;
+}
+
 void TurnPort::ResolveTurnAddress(const rtc::SocketAddress& address) {
   if (resolver_)
     return;
@@ -544,6 +611,9 @@
   if (message->message_id == MSG_ERROR) {
     SignalPortError(this);
     return;
+  } else if (message->message_id == MSG_ALLOCATE_MISMATCH) {
+    OnAllocateMismatch();
+    return;
   }
 
   Port::OnMessage(message);
@@ -805,6 +875,14 @@
     case STUN_ERROR_UNAUTHORIZED:       // Unauthrorized.
       OnAuthChallenge(response, error_code->code());
       break;
+    case STUN_ERROR_TRY_ALTERNATE:
+      OnTryAlternate(response, error_code->code());
+      break;
+    case STUN_ERROR_ALLOCATION_MISMATCH:
+      // We must handle this error async because trying to delete the socket in
+      // OnErrorResponse will cause a deadlock on the socket.
+      port_->thread()->Post(port_, TurnPort::MSG_ALLOCATE_MISMATCH);
+      break;
     default:
       LOG_J(LS_WARNING, port_) << "Allocate response error, code="
                                << error_code->code();
@@ -849,6 +927,57 @@
   port_->SendRequest(new TurnAllocateRequest(port_), 0);
 }
 
+void TurnAllocateRequest::OnTryAlternate(StunMessage* response, int code) {
+  // TODO(guoweis): Currently, we only support UDP redirect
+  if (port_->server_address().proto != PROTO_UDP) {
+    LOG_J(LS_WARNING, port_) << "Receiving 300 Alternate Server on non-UDP "
+                         << "allocating request from ["
+                         << port_->server_address().address.ToSensitiveString()
+                         << "], failed as currently not supported";
+    port_->OnAllocateError();
+    return;
+  }
+
+  // According to RFC 5389 section 11, there are use cases where
+  // authentication of response is not possible, we're not validating
+  // message integrity.
+
+  // Get the alternate server address attribute value.
+  const StunAddressAttribute* alternate_server_attr =
+      response->GetAddress(STUN_ATTR_ALTERNATE_SERVER);
+  if (!alternate_server_attr) {
+    LOG_J(LS_WARNING, port_) << "Missing STUN_ATTR_ALTERNATE_SERVER "
+                             << "attribute in try alternate error response";
+    port_->OnAllocateError();
+    return;
+  }
+  if (!port_->SetAlternateServer(alternate_server_attr->GetAddress())) {
+    port_->OnAllocateError();
+    return;
+  }
+
+  // Check the attributes.
+  const StunByteStringAttribute* realm_attr =
+      response->GetByteString(STUN_ATTR_REALM);
+  if (realm_attr) {
+    LOG_J(LS_INFO, port_) << "Applying STUN_ATTR_REALM attribute in "
+                          << "try alternate error response.";
+    port_->set_realm(realm_attr->GetString());
+  }
+
+  const StunByteStringAttribute* nonce_attr =
+      response->GetByteString(STUN_ATTR_NONCE);
+  if (nonce_attr) {
+    LOG_J(LS_INFO, port_) << "Applying STUN_ATTR_NONCE attribute in "
+                          << "try alternate error response.";
+    port_->set_nonce(nonce_attr->GetString());
+  }
+
+  // Send another allocate request to alternate server,
+  // with the received realm and nonce values.
+  port_->SendRequest(new TurnAllocateRequest(port_), 0);
+}
+
 TurnRefreshRequest::TurnRefreshRequest(TurnPort* port)
     : StunRequest(new TurnMessage()),
       port_(port) {
@@ -876,7 +1005,6 @@
 }
 
 void TurnRefreshRequest::OnErrorResponse(StunMessage* response) {
-  // TODO(juberti): Handle 437 error response as a success.
   const StunErrorCodeAttribute* error_code = response->GetErrorCode();
   LOG_J(LS_WARNING, port_) << "Refresh response error, code="
                            << error_code->code();
diff --git a/p2p/base/turnport.h b/p2p/base/turnport.h
index d73b11d..ab7d4e7 100644
--- a/p2p/base/turnport.h
+++ b/p2p/base/turnport.h
@@ -30,6 +30,7 @@
 
 #include <stdio.h>
 #include <list>
+#include <set>
 #include <string>
 
 #include "talk/p2p/base/port.h"
@@ -119,6 +120,12 @@
 
   int error() const { return error_; }
 
+  void OnAllocateMismatch();
+
+  rtc::AsyncPacketSocket* socket() const {
+    return socket_;
+  }
+
   // Signal with resolved server address.
   // Parameters are port, server address and resolved server address.
   // This signal will be sent only if server address is resolved successfully.
@@ -153,10 +160,14 @@
            int server_priority);
 
  private:
-  enum { MSG_ERROR = MSG_FIRST_AVAILABLE };
+  enum {
+    MSG_ERROR = MSG_FIRST_AVAILABLE,
+    MSG_ALLOCATE_MISMATCH
+  };
 
   typedef std::list<TurnEntry*> EntryList;
   typedef std::map<rtc::Socket::Option, int> SocketOptionsMap;
+  typedef std::set<rtc::SocketAddress> AttemptedServerSet;
 
   virtual void OnMessage(rtc::Message* pmsg);
 
@@ -170,6 +181,7 @@
     }
   }
 
+  bool SetAlternateServer(const rtc::SocketAddress& address);
   void ResolveTurnAddress(const rtc::SocketAddress& address);
   void OnResolveResult(rtc::AsyncResolverInterface* resolver);
 
@@ -207,6 +219,7 @@
 
   ProtocolAddress server_address_;
   RelayCredentials credentials_;
+  AttemptedServerSet attempted_server_addresses_;
 
   rtc::AsyncPacketSocket* socket_;
   SocketOptionsMap socket_options_;
@@ -226,6 +239,9 @@
   // calculating the candidate priority.
   int server_priority_;
 
+  // The number of retries made due to allocate mismatch error.
+  size_t allocate_mismatch_retries_;
+
   friend class TurnEntry;
   friend class TurnAllocateRequest;
   friend class TurnRefreshRequest;
diff --git a/p2p/base/turnport_unittest.cc b/p2p/base/turnport_unittest.cc
index 44dc64f..d895cbd 100644
--- a/p2p/base/turnport_unittest.cc
+++ b/p2p/base/turnport_unittest.cc
@@ -64,6 +64,8 @@
 static const SocketAddress kTurnTcpIntAddr("99.99.99.4",
                                            cricket::TURN_SERVER_PORT);
 static const SocketAddress kTurnUdpExtAddr("99.99.99.5", 0);
+static const SocketAddress kTurnAlternateUdpIntAddr(
+    "99.99.99.6", cricket::TURN_SERVER_PORT);
 static const SocketAddress kTurnUdpIPv6IntAddr(
     "2400:4030:1:2c00:be30:abcd:efab:cdef", cricket::TURN_SERVER_PORT);
 static const SocketAddress kTurnUdpIPv6ExtAddr(
@@ -208,10 +210,13 @@
                             const cricket::ProtocolAddress& server_address) {
     ASSERT(server_address.proto == cricket::PROTO_UDP);
 
-    socket_.reset(socket_factory_.CreateUdpSocket(
-        rtc::SocketAddress(kLocalAddr1.ipaddr(), 0), 0, 0));
-    ASSERT_TRUE(socket_ != NULL);
-    socket_->SignalReadPacket.connect(this, &TurnPortTest::OnSocketReadPacket);
+    if (!socket_) {
+      socket_.reset(socket_factory_.CreateUdpSocket(
+          rtc::SocketAddress(kLocalAddr1.ipaddr(), 0), 0, 0));
+      ASSERT_TRUE(socket_ != NULL);
+      socket_->SignalReadPacket.connect(
+          this, &TurnPortTest::OnSocketReadPacket);
+    }
 
     cricket::RelayCredentials credentials(username, password);
     turn_port_.reset(cricket::TurnPort::Create(
@@ -411,6 +416,80 @@
   ASSERT_EQ(0U, turn_port_->Candidates().size());
 }
 
+// Tests that a new local address is created after
+// STUN_ERROR_ALLOCATION_MISMATCH.
+TEST_F(TurnPortTest, TestTurnAllocateMismatch) {
+  // Do a normal allocation first.
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+  rtc::SocketAddress first_addr(turn_port_->socket()->GetLocalAddress());
+
+  // Forces the socket server to assign the same port.
+  ss_->SetNextPortForTesting(first_addr.port());
+
+  turn_ready_ = false;
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+  turn_port_->PrepareAddress();
+
+  // Verifies that the new port has the same address.
+  EXPECT_EQ(first_addr, turn_port_->socket()->GetLocalAddress());
+
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+
+  // Verifies that the new port has a different address now.
+  EXPECT_NE(first_addr, turn_port_->socket()->GetLocalAddress());
+}
+
+// Tests that a shared-socket-TurnPort creates its own socket after
+// STUN_ERROR_ALLOCATION_MISMATCH.
+TEST_F(TurnPortTest, TestSharedSocketAllocateMismatch) {
+  // Do a normal allocation first.
+  CreateSharedTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+  rtc::SocketAddress first_addr(turn_port_->socket()->GetLocalAddress());
+
+  turn_ready_ = false;
+  CreateSharedTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+
+  // Verifies that the new port has the same address.
+  EXPECT_EQ(first_addr, turn_port_->socket()->GetLocalAddress());
+  EXPECT_TRUE(turn_port_->SharedSocket());
+
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+
+  // Verifies that the new port has a different address now.
+  EXPECT_NE(first_addr, turn_port_->socket()->GetLocalAddress());
+  EXPECT_FALSE(turn_port_->SharedSocket());
+}
+
+TEST_F(TurnPortTest, TestTurnTcpAllocateMismatch) {
+  turn_server_.AddInternalSocket(kTurnTcpIntAddr, cricket::PROTO_TCP);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnTcpProtoAddr);
+
+  // Do a normal allocation first.
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+  rtc::SocketAddress first_addr(turn_port_->socket()->GetLocalAddress());
+
+  // Forces the socket server to assign the same port.
+  ss_->SetNextPortForTesting(first_addr.port());
+
+  turn_ready_ = false;
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnTcpProtoAddr);
+  turn_port_->PrepareAddress();
+
+  // Verifies that the new port has the same address.
+  EXPECT_EQ(first_addr, turn_port_->socket()->GetLocalAddress());
+
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+
+  // Verifies that the new port has a different address now.
+  EXPECT_NE(first_addr, turn_port_->socket()->GetLocalAddress());
+}
+
 // Do a TURN allocation and try to send a packet to it from the outside.
 // The packet should be dropped. Then, try to send a packet from TURN to the
 // outside. It should reach its destination. Finally, try again from the
@@ -445,6 +524,103 @@
   ASSERT_EQ(0U, turn_port_->Candidates().size());
 }
 
+// Test try-alternate-server feature.
+TEST_F(TurnPortTest, TestTurnAlternateServer) {
+  std::vector<rtc::SocketAddress> redirect_addresses;
+  redirect_addresses.push_back(kTurnAlternateUdpIntAddr);
+
+  cricket::TestTurnRedirector redirector(redirect_addresses);
+  turn_server_.AddInternalSocket(kTurnAlternateUdpIntAddr,
+                                 cricket::PROTO_UDP);
+  turn_server_.set_redirect_hook(&redirector);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+
+  // Retrieve the address before we run the state machine.
+  const SocketAddress old_addr = turn_port_->server_address().address;
+
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_ready_, kTimeout);
+  // Retrieve the address again, the turn port's address should be
+  // changed.
+  const SocketAddress new_addr = turn_port_->server_address().address;
+  EXPECT_NE(old_addr, new_addr);
+  ASSERT_EQ(1U, turn_port_->Candidates().size());
+  EXPECT_EQ(kTurnUdpExtAddr.ipaddr(),
+            turn_port_->Candidates()[0].address().ipaddr());
+  EXPECT_NE(0, turn_port_->Candidates()[0].address().port());
+}
+
+// Test that we fail when we redirect to an address different from
+// current IP family.
+TEST_F(TurnPortTest, TestTurnAlternateServerV4toV6) {
+  std::vector<rtc::SocketAddress> redirect_addresses;
+  redirect_addresses.push_back(kTurnUdpIPv6IntAddr);
+
+  cricket::TestTurnRedirector redirector(redirect_addresses);
+  turn_server_.AddInternalSocket(kTurnAlternateUdpIntAddr,
+                                 cricket::PROTO_UDP);
+  turn_server_.set_redirect_hook(&redirector);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_error_, kTimeout);
+}
+
+// Test that we fail to handle alternate-server response over TCP protocol.
+TEST_F(TurnPortTest, TestTurnAlternateServerTcp) {
+  std::vector<rtc::SocketAddress> redirect_addresses;
+  redirect_addresses.push_back(kTurnAlternateUdpIntAddr);
+
+  cricket::TestTurnRedirector redirector(redirect_addresses);
+  turn_server_.set_redirect_hook(&redirector);
+  turn_server_.AddInternalSocket(kTurnTcpIntAddr, cricket::PROTO_TCP);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnTcpProtoAddr);
+
+  turn_server_.AddInternalSocket(kTurnAlternateUdpIntAddr, cricket::PROTO_TCP);
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_error_, kTimeout);
+}
+
+// Test try-alternate-server catches the case of pingpong.
+TEST_F(TurnPortTest, TestTurnAlternateServerPingPong) {
+  std::vector<rtc::SocketAddress> redirect_addresses;
+  redirect_addresses.push_back(kTurnAlternateUdpIntAddr);
+  redirect_addresses.push_back(kTurnUdpIntAddr);
+
+  cricket::TestTurnRedirector redirector(redirect_addresses);
+
+  turn_server_.AddInternalSocket(kTurnAlternateUdpIntAddr,
+                                 cricket::PROTO_UDP);
+  turn_server_.set_redirect_hook(&redirector);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_error_, kTimeout);
+  ASSERT_EQ(0U, turn_port_->Candidates().size());
+  rtc::SocketAddress address;
+  // Verify that we have exhausted all alternate servers instead of
+  // failure caused by other errors.
+  EXPECT_FALSE(redirector.ShouldRedirect(address, &address));
+}
+
+// Test try-alternate-server catch the case of repeated server.
+TEST_F(TurnPortTest, TestTurnAlternateServerDetectRepetition) {
+  std::vector<rtc::SocketAddress> redirect_addresses;
+  redirect_addresses.push_back(kTurnAlternateUdpIntAddr);
+  redirect_addresses.push_back(kTurnAlternateUdpIntAddr);
+
+  cricket::TestTurnRedirector redirector(redirect_addresses);
+
+  turn_server_.AddInternalSocket(kTurnAlternateUdpIntAddr,
+                                 cricket::PROTO_UDP);
+  turn_server_.set_redirect_hook(&redirector);
+  CreateTurnPort(kTurnUsername, kTurnPassword, kTurnUdpProtoAddr);
+
+  turn_port_->PrepareAddress();
+  EXPECT_TRUE_WAIT(turn_error_, kTimeout);
+  ASSERT_EQ(0U, turn_port_->Candidates().size());
+}
+
+
 // Run TurnConnectionTest with one-time-use nonce feature.
 // Here server will send a 438 STALE_NONCE error message for
 // every TURN transaction.
@@ -515,4 +691,3 @@
   EXPECT_EQ(last_fd_count, GetFDCount());
 }
 #endif
-
diff --git a/p2p/base/turnserver.cc b/p2p/base/turnserver.cc
index abc065a..dbcbcd4 100644
--- a/p2p/base/turnserver.cc
+++ b/p2p/base/turnserver.cc
@@ -62,9 +62,9 @@
   return ((msg_type & 0xC000) == 0x4000);
 }
 
-// IDs used for posted messages.
+// IDs used for posted messages for TurnServer::Allocation.
 enum {
-  MSG_TIMEOUT,
+  MSG_ALLOCATION_TIMEOUT,
 };
 
 // Encapsulates a TURN allocation.
@@ -208,6 +208,7 @@
     : thread_(thread),
       nonce_key_(rtc::CreateRandomString(kNonceKeySize)),
       auth_hook_(NULL),
+      redirect_hook_(NULL),
       enable_otu_nonce_(false) {
 }
 
@@ -316,6 +317,15 @@
     return;
   }
 
+  if (redirect_hook_ != NULL && msg.type() == STUN_ALLOCATE_REQUEST) {
+    rtc::SocketAddress address;
+    if (redirect_hook_->ShouldRedirect(conn->src(), &address)) {
+      SendErrorResponseWithAlternateServer(
+          conn, &msg, address);
+      return;
+    }
+  }
+
   // Look up the key that we'll use to validate the M-I. If we have an
   // existing allocation, the key will already be cached.
   Allocation* allocation = FindAllocation(conn);
@@ -334,7 +344,6 @@
   }
 
   if (!allocation && msg.type() == STUN_ALLOCATE_REQUEST) {
-    // This is a new allocate request.
     HandleAllocateRequest(conn, &msg, key);
   } else if (allocation &&
              (msg.type() != STUN_ALLOCATE_REQUEST ||
@@ -551,6 +560,17 @@
   SendStun(conn, &resp);
 }
 
+void TurnServer::SendErrorResponseWithAlternateServer(
+    Connection* conn, const StunMessage* msg,
+    const rtc::SocketAddress& addr) {
+  TurnMessage resp;
+  InitErrorResponse(msg, STUN_ERROR_TRY_ALTERNATE,
+                    STUN_ERROR_REASON_TRY_ALTERNATE_SERVER, &resp);
+  VERIFY(resp.AddAttribute(new StunAddressAttribute(
+      STUN_ATTR_ALTERNATE_SERVER, addr)));
+  SendStun(conn, &resp);
+}
+
 void TurnServer::SendStun(Connection* conn, StunMessage* msg) {
   rtc::ByteBuffer buf;
   // Add a SOFTWARE attribute if one is set.
@@ -588,7 +608,9 @@
   InternalSocketMap::iterator iter = server_sockets_.find(socket);
   if (iter != server_sockets_.end()) {
     rtc::AsyncPacketSocket* socket = iter->first;
-    delete socket;
+    // We must destroy the socket async to avoid invalidating the sigslot
+    // callback list iterator inside a sigslot callback.
+    rtc::Thread::Current()->Dispose(socket);
     server_sockets_.erase(iter);
   }
 }
@@ -642,7 +664,7 @@
        it != perms_.end(); ++it) {
     delete *it;
   }
-  thread_->Clear(this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
   LOG_J(LS_INFO, this) << "Allocation destroyed";
 }
 
@@ -687,7 +709,7 @@
 
   // Figure out the lifetime and start the allocation timer.
   int lifetime_secs = ComputeLifetime(msg);
-  thread_->PostDelayed(lifetime_secs * 1000, this, MSG_TIMEOUT);
+  thread_->PostDelayed(lifetime_secs * 1000, this, MSG_ALLOCATION_TIMEOUT);
 
   LOG_J(LS_INFO, this) << "Created allocation, lifetime=" << lifetime_secs;
 
@@ -714,8 +736,8 @@
   int lifetime_secs = ComputeLifetime(msg);
 
   // Reset the expiration timer.
-  thread_->Clear(this, MSG_TIMEOUT);
-  thread_->PostDelayed(lifetime_secs * 1000, this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
+  thread_->PostDelayed(lifetime_secs * 1000, this, MSG_ALLOCATION_TIMEOUT);
 
   LOG_J(LS_INFO, this) << "Refreshed allocation, lifetime=" << lifetime_secs;
 
@@ -943,7 +965,7 @@
 }
 
 void TurnServer::Allocation::OnMessage(rtc::Message* msg) {
-  ASSERT(msg->message_id == MSG_TIMEOUT);
+  ASSERT(msg->message_id == MSG_ALLOCATION_TIMEOUT);
   SignalDestroyed(this);
   delete this;
 }
@@ -968,16 +990,16 @@
 }
 
 TurnServer::Permission::~Permission() {
-  thread_->Clear(this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
 }
 
 void TurnServer::Permission::Refresh() {
-  thread_->Clear(this, MSG_TIMEOUT);
-  thread_->PostDelayed(kPermissionTimeout, this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
+  thread_->PostDelayed(kPermissionTimeout, this, MSG_ALLOCATION_TIMEOUT);
 }
 
 void TurnServer::Permission::OnMessage(rtc::Message* msg) {
-  ASSERT(msg->message_id == MSG_TIMEOUT);
+  ASSERT(msg->message_id == MSG_ALLOCATION_TIMEOUT);
   SignalDestroyed(this);
   delete this;
 }
@@ -989,16 +1011,16 @@
 }
 
 TurnServer::Channel::~Channel() {
-  thread_->Clear(this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
 }
 
 void TurnServer::Channel::Refresh() {
-  thread_->Clear(this, MSG_TIMEOUT);
-  thread_->PostDelayed(kChannelTimeout, this, MSG_TIMEOUT);
+  thread_->Clear(this, MSG_ALLOCATION_TIMEOUT);
+  thread_->PostDelayed(kChannelTimeout, this, MSG_ALLOCATION_TIMEOUT);
 }
 
 void TurnServer::Channel::OnMessage(rtc::Message* msg) {
-  ASSERT(msg->message_id == MSG_TIMEOUT);
+  ASSERT(msg->message_id == MSG_ALLOCATION_TIMEOUT);
   SignalDestroyed(this);
   delete this;
 }
diff --git a/p2p/base/turnserver.h b/p2p/base/turnserver.h
index 4798232..553d00c 100644
--- a/p2p/base/turnserver.h
+++ b/p2p/base/turnserver.h
@@ -63,6 +63,14 @@
                       std::string* key) = 0;
 };
 
+// An interface enables Turn Server to control redirection behavior.
+class TurnRedirectInterface {
+ public:
+  virtual bool ShouldRedirect(const rtc::SocketAddress& address,
+                              rtc::SocketAddress* out) = 0;
+  virtual ~TurnRedirectInterface() {}
+};
+
 // The core TURN server class. Give it a socket to listen on via
 // AddInternalServerSocket, and a factory to create external sockets via
 // SetExternalSocketFactory, and it's ready to go.
@@ -83,6 +91,10 @@
   // Sets the authentication callback; does not take ownership.
   void set_auth_hook(TurnAuthInterface* auth_hook) { auth_hook_ = auth_hook; }
 
+  void set_redirect_hook(TurnRedirectInterface* redirect_hook) {
+    redirect_hook_ = redirect_hook;
+  }
+
   void set_enable_otu_nonce(bool enable) { enable_otu_nonce_ = enable; }
 
   // Starts listening for packets from internal clients.
@@ -155,6 +167,11 @@
                                           const StunMessage* req,
                                           int code,
                                           const std::string& reason);
+
+  void SendErrorResponseWithAlternateServer(Connection* conn,
+                                            const StunMessage* req,
+                                            const rtc::SocketAddress& addr);
+
   void SendStun(Connection* conn, StunMessage* msg);
   void Send(Connection* conn, const rtc::ByteBuffer& buf);
 
@@ -171,14 +188,17 @@
   std::string realm_;
   std::string software_;
   TurnAuthInterface* auth_hook_;
+  TurnRedirectInterface* redirect_hook_;
   // otu - one-time-use. Server will respond with 438 if it's
   // sees the same nonce in next transaction.
   bool enable_otu_nonce_;
+
   InternalSocketMap server_sockets_;
   ServerSocketMap server_listen_sockets_;
   rtc::scoped_ptr<rtc::PacketSocketFactory>
       external_socket_factory_;
   rtc::SocketAddress external_addr_;
+
   AllocationMap allocations_;
 };
 
diff --git a/p2p/client/autoportallocator.h b/p2p/client/autoportallocator.h
index 298f829..ed87162 100644
--- a/p2p/client/autoportallocator.h
+++ b/p2p/client/autoportallocator.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef TALK_EXAMPLES_LOGIN_AUTOPORTALLOCATOR_H_
-#define TALK_EXAMPLES_LOGIN_AUTOPORTALLOCATOR_H_
+#ifndef TALK_P2P_CLIENT_AUTOPORTALLOCATOR_H_
+#define TALK_P2P_CLIENT_AUTOPORTALLOCATOR_H_
 
 #include <string>
 #include <vector>
@@ -66,4 +66,4 @@
   }
 };
 
-#endif  // TALK_EXAMPLES_LOGIN_AUTOPORTALLOCATOR_H_
+#endif  // TALK_P2P_CLIENT_AUTOPORTALLOCATOR_H_
diff --git a/p2p/client/basicportallocator.cc b/p2p/client/basicportallocator.cc
index d39db8f..0ec13e7 100644
--- a/p2p/client/basicportallocator.cc
+++ b/p2p/client/basicportallocator.cc
@@ -47,13 +47,15 @@
 
 namespace {
 
-const uint32 MSG_CONFIG_START = 1;
-const uint32 MSG_CONFIG_READY = 2;
-const uint32 MSG_ALLOCATE = 3;
-const uint32 MSG_ALLOCATION_PHASE = 4;
-const uint32 MSG_SHAKE = 5;
-const uint32 MSG_SEQUENCEOBJECTS_CREATED = 6;
-const uint32 MSG_CONFIG_STOP = 7;
+enum {
+  MSG_CONFIG_START,
+  MSG_CONFIG_READY,
+  MSG_ALLOCATE,
+  MSG_ALLOCATION_PHASE,
+  MSG_SHAKE,
+  MSG_SEQUENCEOBJECTS_CREATED,
+  MSG_CONFIG_STOP,
+};
 
 const int PHASE_UDP = 0;
 const int PHASE_RELAY = 1;
@@ -147,9 +149,6 @@
                     const rtc::PacketTime& packet_time);
 
   void OnPortDestroyed(PortInterface* port);
-  void OnResolvedTurnServerAddress(
-    TurnPort* port, const rtc::SocketAddress& server_address,
-    const rtc::SocketAddress& resolved_server_address);
 
   BasicPortAllocatorSession* session_;
   rtc::Network* network_;
@@ -161,8 +160,7 @@
   rtc::scoped_ptr<rtc::AsyncPacketSocket> udp_socket_;
   // There will be only one udp port per AllocationSequence.
   UDPPort* udp_port_;
-  // Keeping a map for turn ports keyed with server addresses.
-  std::map<rtc::SocketAddress, Port*> turn_ports_;
+  std::vector<TurnPort*> turn_ports_;
   int phase_;
 };
 
@@ -228,10 +226,11 @@
 PortAllocatorSession *BasicPortAllocator::CreateSessionInternal(
     const std::string& content_name, int component,
     const std::string& ice_ufrag, const std::string& ice_pwd) {
-  return new BasicPortAllocatorSession(this, content_name, component,
-                                       ice_ufrag, ice_pwd);
+  return new BasicPortAllocatorSession(
+      this, content_name, component, ice_ufrag, ice_pwd);
 }
 
+
 // BasicPortAllocatorSession
 BasicPortAllocatorSession::BasicPortAllocatorSession(
     BasicPortAllocator *allocator,
@@ -432,7 +431,11 @@
       }
 
       if (!(sequence_flags & PORTALLOCATOR_ENABLE_IPV6) &&
+#ifdef USE_WEBRTC_DEV_BRANCH
+          networks[i]->GetBestIP().family() == AF_INET6) {
+#else  // USE_WEBRTC_DEV_BRANCH
           networks[i]->ip().family() == AF_INET6) {
+#endif  // USE_WEBRTC_DEV_BRANCH
         // Skip IPv6 networks unless the flag's been set.
         continue;
       }
@@ -530,8 +533,10 @@
   // Send candidates whose protocol is enabled.
   std::vector<Candidate> candidates;
   ProtocolType pvalue;
+  bool candidate_allowed_to_send = CheckCandidateFilter(c);
   if (StringToProto(c.protocol().c_str(), &pvalue) &&
-      data->sequence()->ProtocolEnabled(pvalue)) {
+      data->sequence()->ProtocolEnabled(pvalue) &&
+      candidate_allowed_to_send) {
     candidates.push_back(c);
   }
 
@@ -542,7 +547,9 @@
   // Moving to READY state as we have atleast one candidate from the port.
   // Since this port has atleast one candidate we should forward this port
   // to listners, to allow connections from this port.
-  if (!data->ready()) {
+  // Also we should make sure that candidate gathered from this port is allowed
+  // to send outside.
+  if (!data->ready() && candidate_allowed_to_send) {
     data->set_ready();
     SignalPortReady(this, port);
   }
@@ -588,6 +595,8 @@
 
     const std::vector<Candidate>& potentials = it->port()->Candidates();
     for (size_t i = 0; i < potentials.size(); ++i) {
+      if (!CheckCandidateFilter(potentials[i]))
+        continue;
       ProtocolType pvalue;
       if (!StringToProto(potentials[i].protocol().c_str(), &pvalue))
         continue;
@@ -602,6 +611,31 @@
   }
 }
 
+bool BasicPortAllocatorSession::CheckCandidateFilter(const Candidate& c) {
+  uint32 filter = allocator_->candidate_filter();
+  bool allowed = false;
+  if (filter & CF_RELAY) {
+    allowed |= (c.type() == RELAY_PORT_TYPE);
+  }
+
+  if (filter & CF_REFLEXIVE) {
+    // We allow host candidates if the filter allows server-reflexive candidates
+    // and the candidate is a public IP. Because we don't generate
+    // server-reflexive candidates if they have the same IP as the host
+    // candidate (i.e. when the host candidate is a public IP), filtering to
+    // only server-reflexive candidates won't work right when the host
+    // candidates have public IPs.
+    allowed |= (c.type() == STUN_PORT_TYPE) ||
+               (c.type() == LOCAL_PORT_TYPE && !c.address().IsPrivateIP());
+  }
+
+  if (filter & CF_HOST) {
+    allowed |= (c.type() == LOCAL_PORT_TYPE);
+  }
+
+  return allowed;
+}
+
 void BasicPortAllocatorSession::OnPortAllocationComplete(
     AllocationSequence* seq) {
   // Send candidate allocation complete signal if all ports are done.
@@ -698,7 +732,12 @@
                                        uint32 flags)
     : session_(session),
       network_(network),
+
+#ifdef USE_WEBRTC_DEV_BRANCH
+      ip_(network->GetBestIP()),
+#else  // USE_WEBRTC_DEV_BRANCH
       ip_(network->ip()),
+#endif  // USE_WEBRTC_DEV_BRANCH
       config_(config),
       state_(kInit),
       flags_(flags),
@@ -741,7 +780,11 @@
 
 void AllocationSequence::DisableEquivalentPhases(rtc::Network* network,
     PortConfiguration* config, uint32* flags) {
+#ifdef USE_WEBRTC_DEV_BRANCH
+  if (!((network == network_) && (ip_ == network->GetBestIP()))) {
+#else  // USE_WEBRTC_DEV_BRANCH
   if (!((network == network_) && (ip_ == network->ip()))) {
+#endif  // USE_WEBRTC_DEV_BRANCH
     // Different network setup; nothing is equivalent.
     return;
   }
@@ -1020,26 +1063,15 @@
     // don't pass shared socket for ports which will create TCP sockets.
     // TODO(mallinath) - Enable shared socket mode for TURN ports. Disabled
     // due to webrtc bug https://code.google.com/p/webrtc/issues/detail?id=3537
-    if (IsFlagSet(PORTALLOCATOR_ENABLE_TURN_SHARED_SOCKET) &&
+    if (IsFlagSet(PORTALLOCATOR_ENABLE_SHARED_SOCKET) &&
         relay_port->proto == PROTO_UDP) {
       port = TurnPort::Create(session_->network_thread(),
                               session_->socket_factory(),
                               network_, udp_socket_.get(),
                               session_->username(), session_->password(),
                               *relay_port, config.credentials, config.priority);
-      // If we are using shared socket for TURN and udp ports, we need to
-      // find a way to demux the packets to the correct port when received.
-      // Mapping against server_address is one way of doing this. When packet
-      // is received the remote_address will be checked against the map.
-      // If server address is not resolved, a signal will be sent from the port
-      // after the address is resolved. The map entry will updated with the
-      // resolved address when the signal is received from the port.
-      if ((*relay_port).address.IsUnresolved()) {
-        // If server address is not resolved then listen for signal from port.
-        port->SignalResolvedServerAddress.connect(
-            this, &AllocationSequence::OnResolvedTurnServerAddress);
-      }
-      turn_ports_[(*relay_port).address] = port;
+
+      turn_ports_.push_back(port);
       // Listen to the port destroyed signal, to allow AllocationSequence to
       // remove entrt from it's map.
       port->SignalDestroyed.connect(this, &AllocationSequence::OnPortDestroyed);
@@ -1063,51 +1095,45 @@
     const rtc::SocketAddress& remote_addr,
     const rtc::PacketTime& packet_time) {
   ASSERT(socket == udp_socket_.get());
-  // If the packet is received from one of the TURN server in the config, then
-  // pass down the packet to that port, otherwise it will be handed down to
-  // the local udp port.
-  Port* port = NULL;
-  std::map<rtc::SocketAddress, Port*>::iterator iter =
-      turn_ports_.find(remote_addr);
-  if (iter != turn_ports_.end()) {
-    port = iter->second;
-  } else if (udp_port_) {
-    port = udp_port_;
+
+  bool turn_port_found = false;
+
+  // Try to find the TurnPort that matches the remote address. Note that the
+  // message could be a STUN binding response if the TURN server is also used as
+  // a STUN server. We don't want to parse every message here to check if it is
+  // a STUN binding response, so we pass the message to TurnPort regardless of
+  // the message type. The TurnPort will just ignore the message since it will
+  // not find any request by transaction ID.
+  for (std::vector<TurnPort*>::const_iterator it = turn_ports_.begin();
+       it != turn_ports_.end(); ++it) {
+    TurnPort* port = *it;
+    if (port->server_address().address == remote_addr) {
+      port->HandleIncomingPacket(socket, data, size, remote_addr, packet_time);
+      turn_port_found = true;
+      break;
+    }
   }
-  ASSERT(port != NULL);
-  if (port) {
-    port->HandleIncomingPacket(socket, data, size, remote_addr, packet_time);
+
+  if (udp_port_) {
+    const ServerAddresses& stun_servers = udp_port_->server_addresses();
+
+    // Pass the packet to the UdpPort if there is no matching TurnPort, or if
+    // the TURN server is also a STUN server.
+    if (!turn_port_found ||
+        stun_servers.find(remote_addr) != stun_servers.end()) {
+      udp_port_->HandleIncomingPacket(
+          socket, data, size, remote_addr, packet_time);
+    }
   }
 }
 
 void AllocationSequence::OnPortDestroyed(PortInterface* port) {
   if (udp_port_ == port) {
     udp_port_ = NULL;
-  } else {
-    std::map<rtc::SocketAddress, Port*>::iterator iter;
-    for (iter = turn_ports_.begin(); iter != turn_ports_.end(); ++iter) {
-      if (iter->second == port) {
-        turn_ports_.erase(iter);
-        break;
-      }
-    }
-  }
-}
-
-void AllocationSequence::OnResolvedTurnServerAddress(
-    TurnPort* port, const rtc::SocketAddress& server_address,
-    const rtc::SocketAddress& resolved_server_address) {
-  std::map<rtc::SocketAddress, Port*>::iterator iter;
-  iter = turn_ports_.find(server_address);
-  if (iter == turn_ports_.end()) {
-    LOG(LS_INFO) << "TurnPort entry is not found in the map.";
     return;
   }
 
-  ASSERT(iter->second == port);
-  // Remove old entry and then insert using the resolved address as key.
-  turn_ports_.erase(iter);
-  turn_ports_[resolved_server_address] = port;
+  turn_ports_.erase(std::find(turn_ports_.begin(), turn_ports_.end(), port));
 }
 
 // PortConfiguration
diff --git a/p2p/client/basicportallocator.h b/p2p/client/basicportallocator.h
index 5f43880..d424772 100644
--- a/p2p/client/basicportallocator.h
+++ b/p2p/client/basicportallocator.h
@@ -160,7 +160,6 @@
 
     void set_ready() { ASSERT(state_ == STATE_INIT); state_ = STATE_READY; }
     void set_complete() {
-      ASSERT(state_ == STATE_READY);
       state_ = STATE_COMPLETE;
     }
     void set_error() {
@@ -201,6 +200,8 @@
   void OnPortAllocationComplete(AllocationSequence* seq);
   PortData* FindPort(Port* port);
 
+  bool CheckCandidateFilter(const Candidate& c);
+
   BasicPortAllocator* allocator_;
   rtc::Thread* network_thread_;
   rtc::scoped_ptr<rtc::PacketSocketFactory> owned_socket_factory_;
diff --git a/p2p/client/connectivitychecker.cc b/p2p/client/connectivitychecker.cc
index 06de5e4..723c5a1 100644
--- a/p2p/client/connectivitychecker.cc
+++ b/p2p/client/connectivitychecker.cc
@@ -1,5 +1,29 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #include <string>
 
@@ -214,7 +238,12 @@
     return;
   }
   rtc::ProxyInfo proxy_info = request->proxy();
-  NicMap::iterator i = nics_.find(NicId(networks[0]->ip(), proxy_info.address));
+  NicMap::iterator i =
+#ifdef USE_WEBRTC_DEV_BRANCH
+      nics_.find(NicId(networks[0]->GetBestIP(), proxy_info.address));
+#else  // USE_WEBRTC_DEV_BRANCH
+      nics_.find(NicId(networks[0]->ip(), proxy_info.address));
+#endif  // USE_WEBRTC_DEV_BRANCH
   if (i != nics_.end()) {
     int port = request->port();
     uint32 now = rtc::Time();
@@ -247,7 +276,11 @@
   ASSERT(worker_ == rtc::Thread::Current());
   RelayPort* relay_port = reinterpret_cast<RelayPort*>(port);
   const ProtocolAddress* address = relay_port->ServerAddress(0);
+#ifdef USE_WEBRTC_DEV_BRANCH
+  rtc::IPAddress ip = port->Network()->GetBestIP();
+#else  // USE_WEBRTC_DEV_BRANCH
   rtc::IPAddress ip = port->Network()->ip();
+#endif  // USE_WEBRTC_DEV_BRANCH
   NicMap::iterator i = nics_.find(NicId(ip, port->proxy().address));
   if (i != nics_.end()) {
     // We have it already, add the new information.
@@ -281,7 +314,11 @@
   ASSERT(worker_ == rtc::Thread::Current());
   const std::vector<Candidate> candidates = port->Candidates();
   Candidate c = candidates[0];
+#ifdef USE_WEBRTC_DEV_BRANCH
+  rtc::IPAddress ip = port->Network()->GetBestIP();
+#else  // USE_WEBRTC_DEV_BRANCH
   rtc::IPAddress ip = port->Network()->ip();
+#endif  // USE_WEBRTC_DEV_BRANCH
   NicMap::iterator i = nics_.find(NicId(ip, port->proxy().address));
   if (i != nics_.end()) {
     // We have it already, add the new information.
@@ -300,7 +337,11 @@
 void ConnectivityChecker::OnStunPortError(Port* port) {
   ASSERT(worker_ == rtc::Thread::Current());
   LOG(LS_ERROR) << "Stun address error.";
+#ifdef USE_WEBRTC_DEV_BRANCH
+  rtc::IPAddress ip = port->Network()->GetBestIP();
+#else  // USE_WEBRTC_DEV_BRANCH
   rtc::IPAddress ip = port->Network()->ip();
+#endif  // USE_WEBRTC_DEV_BRANCH
   NicMap::iterator i = nics_.find(NicId(ip, port->proxy().address));
   if (i != nics_.end()) {
     // We have it already, add the new information.
@@ -337,19 +378,36 @@
 StunPort* ConnectivityChecker::CreateStunPort(
     const std::string& username, const std::string& password,
     const PortConfiguration* config, rtc::Network* network) {
-  return StunPort::Create(worker_, socket_factory_.get(),
-                          network, network->ip(), 0, 0,
-                          username, password, config->stun_servers);
+  return StunPort::Create(worker_,
+                          socket_factory_.get(),
+                          network,
+#ifdef USE_WEBRTC_DEV_BRANCH
+                          network->GetBestIP(),
+#else  // USE_WEBRTC_DEV_BRANCH
+                          network->ip(),
+#endif  // USE_WEBRTC_DEV_BRANCH
+                          0,
+                          0,
+                          username,
+                          password,
+                          config->stun_servers);
 }
 
 RelayPort* ConnectivityChecker::CreateRelayPort(
     const std::string& username, const std::string& password,
     const PortConfiguration* config, rtc::Network* network) {
-  return RelayPort::Create(worker_, socket_factory_.get(),
-                           network, network->ip(),
+  return RelayPort::Create(worker_,
+                           socket_factory_.get(),
+                           network,
+#ifdef USE_WEBRTC_DEV_BRANCH
+                           network->GetBestIP(),
+#else  // USE_WEBRTC_DEV_BRANCH
+                           network->ip(),
+#endif  // USE_WEBRTC_DEV_BRANCH
                            port_allocator_->min_port(),
                            port_allocator_->max_port(),
-                           username, password);
+                           username,
+                           password);
 }
 
 void ConnectivityChecker::CreateRelayPorts(
@@ -365,8 +423,12 @@
   for (relay = config->relays.begin();
        relay != config->relays.end(); ++relay) {
     for (uint32 i = 0; i < networks.size(); ++i) {
-      NicMap::iterator iter = nics_.find(NicId(networks[i]->ip(),
-                                               proxy_info.address));
+      NicMap::iterator iter =
+#ifdef USE_WEBRTC_DEV_BRANCH
+          nics_.find(NicId(networks[i]->GetBestIP(), proxy_info.address));
+#else  // USE_WEBRTC_DEV_BRANCH
+          nics_.find(NicId(networks[i]->ip(), proxy_info.address));
+#endif  // USE_WEBRTC_DEV_BRANCH
       if (iter != nics_.end()) {
         // TODO: Now setting the same start time for all protocols.
         // This might affect accuracy, but since we are mainly looking for
@@ -423,7 +485,11 @@
   rtc::ProxyInfo proxy_info = GetProxyInfo();
   bool allocate_relay_ports = false;
   for (uint32 i = 0; i < networks.size(); ++i) {
+#ifdef USE_WEBRTC_DEV_BRANCH
+    if (AddNic(networks[i]->GetBestIP(), proxy_info.address)) {
+#else  // USE_WEBRTC_DEV_BRANCH
     if (AddNic(networks[i]->ip(), proxy_info.address)) {
+#endif  // USE_WEBRTC_DEV_BRANCH
       Port* port = CreateStunPort(username, password, &config, networks[i]);
       if (port) {
 
@@ -500,7 +566,12 @@
     return;
   }
   rtc::ProxyInfo proxy_info = GetProxyInfo();
-  NicMap::iterator i = nics_.find(NicId(networks[0]->ip(), proxy_info.address));
+  NicMap::iterator i =
+#ifdef USE_WEBRTC_DEV_BRANCH
+      nics_.find(NicId(networks[0]->GetBestIP(), proxy_info.address));
+#else  // USE_WEBRTC_DEV_BRANCH
+      nics_.find(NicId(networks[0]->ip(), proxy_info.address));
+#endif  // USE_WEBRTC_DEV_BRANCH
   if (i != nics_.end()) {
     uint32 now = rtc::Time();
     NicInfo* nic_info = &i->second;
diff --git a/p2p/client/connectivitychecker_unittest.cc b/p2p/client/connectivitychecker_unittest.cc
index b96cf17..187505a 100644
--- a/p2p/client/connectivitychecker_unittest.cc
+++ b/p2p/client/connectivitychecker_unittest.cc
@@ -1,5 +1,29 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-
+/*
+ * libjingle
+ * Copyright 2011, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #include <string>
 
@@ -211,19 +235,35 @@
   virtual StunPort* CreateStunPort(
       const std::string& username, const std::string& password,
       const PortConfiguration* config, rtc::Network* network) {
-    return new FakeStunPort(worker(), socket_factory_,
-                            network, network->ip(),
-                            kMinPort, kMaxPort,
-                            username, password,
+    return new FakeStunPort(worker(),
+                            socket_factory_,
+                            network,
+#ifdef USE_WEBRTC_DEV_BRANCH
+                            network->GetBestIP(),
+#else  // USE_WEBRTC_DEV_BRANCH
+                            network->ip(),
+#endif  // USE_WEBRTC_DEV_BRANCH
+                            kMinPort,
+                            kMaxPort,
+                            username,
+                            password,
                             config->stun_servers);
   }
   virtual RelayPort* CreateRelayPort(
       const std::string& username, const std::string& password,
       const PortConfiguration* config, rtc::Network* network) {
-    return new FakeRelayPort(worker(), socket_factory_,
-                             network, network->ip(),
-                             kMinPort, kMaxPort,
-                             username, password);
+    return new FakeRelayPort(worker(),
+                             socket_factory_,
+                             network,
+#ifdef USE_WEBRTC_DEV_BRANCH
+                             network->GetBestIP(),
+#else  // USE_WEBRTC_DEV_BRANCH
+                             network->ip(),
+#endif  // USE_WEBRTC_DEV_BRANCH
+                             kMinPort,
+                             kMaxPort,
+                             username,
+                             password);
   }
   virtual void InitiateProxyDetection() {
     if (!proxy_initiated_) {
diff --git a/p2p/client/fakeportallocator.h b/p2p/client/fakeportallocator.h
index 6c36c4e..e1a04dd 100644
--- a/p2p/client/fakeportallocator.h
+++ b/p2p/client/fakeportallocator.h
@@ -1,6 +1,29 @@
-// Copyright 2010 Google Inc. All Rights Reserved,
-//
-// Author: Justin Uberti (juberti@google.com)
+/*
+ * libjingle
+ * Copyright 2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice,
+ *     this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright notice,
+ *     this list of conditions and the following disclaimer in the documentation
+ *     and/or other materials provided with the distribution.
+ *  3. The name of the author may not be used to endorse or promote products
+ *     derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #ifndef TALK_P2P_CLIENT_FAKEPORTALLOCATOR_H_
 #define TALK_P2P_CLIENT_FAKEPORTALLOCATOR_H_
@@ -39,10 +62,18 @@
 
   virtual void StartGettingPorts() {
     if (!port_) {
-      port_.reset(cricket::UDPPort::Create(worker_thread_, factory_,
-                      &network_, network_.ip(), 0, 0,
-                      username(),
-                      password()));
+      port_.reset(cricket::UDPPort::Create(worker_thread_,
+                                           factory_,
+                                           &network_,
+#ifdef USE_WEBRTC_DEV_BRANCH
+                                           network_.GetBestIP(),
+#else  // USE_WEBRTC_DEV_BRANCH
+                                           network_.ip(),
+#endif  // USE_WEBRTC_DEV_BRANCH
+                                           0,
+                                           0,
+                                           username(),
+                                           password()));
       AddPort(port_.get());
     }
     ++port_config_count_;
diff --git a/p2p/client/portallocator_unittest.cc b/p2p/client/portallocator_unittest.cc
index 57cfbe3..e793064 100644
--- a/p2p/client/portallocator_unittest.cc
+++ b/p2p/client/portallocator_unittest.cc
@@ -53,6 +53,7 @@
 using rtc::Thread;
 
 static const SocketAddress kClientAddr("11.11.11.11", 0);
+static const SocketAddress kPrivateAddr("192.168.1.11", 0);
 static const SocketAddress kClientIPv6Addr(
     "2401:fa00:4:1000:be30:5bff:fee5:c3", 0);
 static const SocketAddress kClientAddr2("22.22.22.22", 0);
@@ -132,9 +133,32 @@
   bool SetPortRange(int min_port, int max_port) {
     return allocator_->SetPortRange(min_port, max_port);
   }
-  rtc::NATServer* CreateNatServer(const SocketAddress& addr,
-                                        rtc::NATType type) {
-    return new rtc::NATServer(type, vss_.get(), addr, vss_.get(), addr);
+  void ResetWithNatServer(const rtc::SocketAddress& stun_server) {
+    nat_server_.reset(new rtc::NATServer(
+        rtc::NAT_OPEN_CONE, vss_.get(), kNatAddr, vss_.get(), kNatAddr));
+
+    ServerAddresses stun_servers;
+    stun_servers.insert(stun_server);
+    allocator_.reset(new cricket::BasicPortAllocator(
+        &network_manager_, &nat_socket_factory_, stun_servers));
+    allocator().set_step_delay(cricket::kMinimumStepDelay);
+  }
+
+  void AddTurnServers(const rtc::SocketAddress& udp_turn,
+                      const rtc::SocketAddress& tcp_turn) {
+    cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
+    cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
+    relay_server.credentials = credentials;
+
+    if (!udp_turn.IsNil()) {
+      relay_server.ports.push_back(cricket::ProtocolAddress(
+          kTurnUdpIntAddr, cricket::PROTO_UDP, false));
+    }
+    if (!tcp_turn.IsNil()) {
+      relay_server.ports.push_back(cricket::ProtocolAddress(
+          kTurnTcpIntAddr, cricket::PROTO_TCP, false));
+    }
+    allocator_->AddRelay(relay_server);
   }
 
   bool CreateSession(int component) {
@@ -253,6 +277,7 @@
   rtc::scoped_ptr<rtc::VirtualSocketServer> vss_;
   rtc::scoped_ptr<rtc::FirewallSocketServer> fss_;
   rtc::SocketServerScope ss_scope_;
+  rtc::scoped_ptr<rtc::NATServer> nat_server_;
   rtc::NATSocketFactory nat_factory_;
   rtc::BasicPacketSocketFactory nat_socket_factory_;
   cricket::TestStunServer stun_server_;
@@ -538,7 +563,7 @@
 // Test to verify ICE restart process.
 TEST_F(PortAllocatorTest, TestGetAllPortsRestarts) {
   AddInterface(kClientAddr);
-  EXPECT_TRUE(CreateSession(1));
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
   session_->StartGettingPorts();
   EXPECT_EQ_WAIT(7U, candidates_.size(), kDefaultAllocationTimeout);
   EXPECT_EQ(4U, ports_.size());
@@ -546,6 +571,73 @@
   // TODO - Extend this to verify ICE restart.
 }
 
+// Test ICE candidate filter mechanism with options Relay/Host/Reflexive.
+TEST_F(PortAllocatorTest, TestCandidateFilterWithRelayOnly) {
+  AddInterface(kClientAddr);
+  allocator().set_candidate_filter(cricket::CF_RELAY);
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
+  session_->StartGettingPorts();
+  EXPECT_TRUE_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout);
+  // Using GTURN, we will have 4 candidates.
+  EXPECT_EQ(4U, candidates_.size());
+  EXPECT_EQ(1U, ports_.size());  // Only Relay port will be in ready state.
+  for (size_t i = 0; i < candidates_.size(); ++i) {
+    EXPECT_EQ(std::string(cricket::RELAY_PORT_TYPE), candidates_[i].type());
+  }
+}
+
+TEST_F(PortAllocatorTest, TestCandidateFilterWithHostOnly) {
+  AddInterface(kClientAddr);
+  allocator().set_flags(cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+                        cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET);
+  allocator().set_candidate_filter(cricket::CF_HOST);
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
+  session_->StartGettingPorts();
+  EXPECT_TRUE_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout);
+  EXPECT_EQ(2U, candidates_.size()); // Host UDP/TCP candidates only.
+  EXPECT_EQ(2U, ports_.size()); // UDP/TCP ports only.
+  for (size_t i = 0; i < candidates_.size(); ++i) {
+    EXPECT_EQ(std::string(cricket::LOCAL_PORT_TYPE), candidates_[i].type());
+  }
+}
+
+// Host is behind the NAT.
+TEST_F(PortAllocatorTest, TestCandidateFilterWithReflexiveOnly) {
+  AddInterface(kPrivateAddr);
+  ResetWithNatServer(kStunAddr);
+
+  allocator().set_flags(cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+                        cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET);
+  allocator().set_candidate_filter(cricket::CF_REFLEXIVE);
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
+  session_->StartGettingPorts();
+  EXPECT_TRUE_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout);
+  // Host is behind NAT, no private address will be exposed. Hence only UDP
+  // port with STUN candidate will be sent outside.
+  EXPECT_EQ(1U, candidates_.size()); // Only STUN candidate.
+  EXPECT_EQ(1U, ports_.size());  // Only UDP port will be in ready state.
+  for (size_t i = 0; i < candidates_.size(); ++i) {
+    EXPECT_EQ(std::string(cricket::STUN_PORT_TYPE), candidates_[i].type());
+  }
+}
+
+// Host is not behind the NAT.
+TEST_F(PortAllocatorTest, TestCandidateFilterWithReflexiveOnlyAndNoNAT) {
+  AddInterface(kClientAddr);
+  allocator().set_flags(cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+                        cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET);
+  allocator().set_candidate_filter(cricket::CF_REFLEXIVE);
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
+  session_->StartGettingPorts();
+  EXPECT_TRUE_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout);
+  // Host has a public address, both UDP and TCP candidates will be exposed.
+  EXPECT_EQ(2U, candidates_.size()); // Local UDP + TCP candidate.
+  EXPECT_EQ(2U, ports_.size());  //  UDP and TCP ports will be in ready state.
+  for (size_t i = 0; i < candidates_.size(); ++i) {
+    EXPECT_EQ(std::string(cricket::LOCAL_PORT_TYPE), candidates_[i].type());
+  }
+}
+
 TEST_F(PortAllocatorTest, TestBasicMuxFeatures) {
   AddInterface(kClientAddr);
   allocator().set_flags(cricket::PORTALLOCATOR_ENABLE_BUNDLE);
@@ -698,13 +790,8 @@
 // local candidates as client behind a nat.
 TEST_F(PortAllocatorTest, TestSharedSocketWithNat) {
   AddInterface(kClientAddr);
-  rtc::scoped_ptr<rtc::NATServer> nat_server(
-      CreateNatServer(kNatAddr, rtc::NAT_OPEN_CONE));
-  ServerAddresses stun_servers;
-  stun_servers.insert(kStunAddr);
-  allocator_.reset(new cricket::BasicPortAllocator(
-      &network_manager_, &nat_socket_factory_, stun_servers));
-  allocator_->set_step_delay(cricket::kMinimumStepDelay);
+  ResetWithNatServer(kStunAddr);
+
   allocator_->set_flags(allocator().flags() |
                         cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
                         cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET);
@@ -726,14 +813,8 @@
   turn_server_.AddInternalSocket(kTurnTcpIntAddr, cricket::PROTO_TCP);
   AddInterface(kClientAddr);
   allocator_.reset(new cricket::BasicPortAllocator(&network_manager_));
-  cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
-  cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
-  relay_server.credentials = credentials;
-  relay_server.ports.push_back(cricket::ProtocolAddress(
-      kTurnUdpIntAddr, cricket::PROTO_UDP, false));
-  relay_server.ports.push_back(cricket::ProtocolAddress(
-      kTurnTcpIntAddr, cricket::PROTO_TCP, false));
-  allocator_->AddRelay(relay_server);
+
+  AddTurnServers(kTurnUdpIntAddr, kTurnTcpIntAddr);
 
   allocator_->set_step_delay(cricket::kMinimumStepDelay);
   allocator_->set_flags(allocator().flags() |
@@ -790,20 +871,10 @@
 // stun and turn candidates.
 TEST_F(PortAllocatorTest, TestSharedSocketWithNatUsingTurn) {
   AddInterface(kClientAddr);
-  rtc::scoped_ptr<rtc::NATServer> nat_server(
-      CreateNatServer(kNatAddr, rtc::NAT_OPEN_CONE));
-  ServerAddresses stun_servers;
-  stun_servers.insert(kStunAddr);
-  allocator_.reset(new cricket::BasicPortAllocator(
-      &network_manager_, &nat_socket_factory_, stun_servers));
-  cricket::RelayServerConfig relay_server(cricket::RELAY_TURN);
-  cricket::RelayCredentials credentials(kTurnUsername, kTurnPassword);
-  relay_server.credentials = credentials;
-  relay_server.ports.push_back(cricket::ProtocolAddress(
-      kTurnUdpIntAddr, cricket::PROTO_UDP, false));
-  allocator_->AddRelay(relay_server);
+  ResetWithNatServer(kStunAddr);
 
-  allocator_->set_step_delay(cricket::kMinimumStepDelay);
+  AddTurnServers(kTurnUdpIntAddr, rtc::SocketAddress());
+
   allocator_->set_flags(allocator().flags() |
                         cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
                         cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET |
@@ -829,6 +900,45 @@
   EXPECT_EQ(1U, ports_[1]->Candidates().size());
 }
 
+// Test that when PORTALLOCATOR_ENABLE_SHARED_SOCKET is enabled and the TURN
+// server is also used as the STUN server, we should get 'local', 'stun', and
+// 'relay' candidates.
+TEST_F(PortAllocatorTest, TestSharedSocketWithNatUsingTurnAsStun) {
+  AddInterface(kClientAddr);
+  ResetWithNatServer(kTurnUdpIntAddr);
+  AddTurnServers(kTurnUdpIntAddr, rtc::SocketAddress());
+
+  // Must set the step delay to 0 to make sure the relay allocation phase is
+  // started before the STUN candidates are obtained, so that the STUN binding
+  // response is processed when both StunPort and TurnPort exist to reproduce
+  // webrtc issue 3537.
+  allocator_->set_step_delay(0);
+  allocator_->set_flags(allocator().flags() |
+                        cricket::PORTALLOCATOR_ENABLE_SHARED_UFRAG |
+                        cricket::PORTALLOCATOR_ENABLE_SHARED_SOCKET |
+                        cricket::PORTALLOCATOR_DISABLE_TCP);
+
+  EXPECT_TRUE(CreateSession(cricket::ICE_CANDIDATE_COMPONENT_RTP));
+  session_->StartGettingPorts();
+
+  ASSERT_EQ_WAIT(3U, candidates_.size(), kDefaultAllocationTimeout);
+  EXPECT_PRED5(CheckCandidate, candidates_[0],
+      cricket::ICE_CANDIDATE_COMPONENT_RTP, "local", "udp", kClientAddr);
+  EXPECT_PRED5(CheckCandidate, candidates_[1],
+      cricket::ICE_CANDIDATE_COMPONENT_RTP, "stun", "udp",
+      rtc::SocketAddress(kNatAddr.ipaddr(), 0));
+  EXPECT_PRED5(CheckCandidate, candidates_[2],
+      cricket::ICE_CANDIDATE_COMPONENT_RTP, "relay", "udp",
+      rtc::SocketAddress(kTurnUdpExtAddr.ipaddr(), 0));
+  EXPECT_EQ(candidates_[2].related_address(), candidates_[1].address());
+
+  EXPECT_TRUE_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout);
+  EXPECT_EQ(3U, candidates_.size());
+  // Local port will be created first and then TURN port.
+  EXPECT_EQ(2U, ports_[0]->Candidates().size());
+  EXPECT_EQ(1U, ports_[1]->Candidates().size());
+}
+
 // This test verifies when PORTALLOCATOR_ENABLE_SHARED_SOCKET flag is enabled
 // and fail to generate STUN candidate, local UDP candidate is generated
 // properly.
diff --git a/p2p/client/sessionmanagertask.h b/p2p/client/sessionmanagertask.h
index d7d9733..e16d9d6 100644
--- a/p2p/client/sessionmanagertask.h
+++ b/p2p/client/sessionmanagertask.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _SESSIONMANAGERTASK_H_
-#define _SESSIONMANAGERTASK_H_
+#ifndef TALK_P2P_CLIENT_SESSIONMANAGERTASK_H_
+#define TALK_P2P_CLIENT_SESSIONMANAGERTASK_H_
 
 #include "talk/p2p/base/sessionmanager.h"
 #include "talk/p2p/client/sessionsendtask.h"
@@ -90,4 +90,4 @@
 
 }  // namespace cricket
 
-#endif // _SESSIONMANAGERTASK_H_
+#endif // TALK_P2P_CLIENT_SESSIONMANAGERTASK_H_
diff --git a/session/media/call.cc b/session/media/call.cc
index 56d886d..4fbdd0f 100644
--- a/session/media/call.cc
+++ b/session/media/call.cc
@@ -94,7 +94,6 @@
 Call::Call(MediaSessionClient* session_client)
     : id_(rtc::CreateRandomId()),
       session_client_(session_client),
-      local_renderer_(NULL),
       has_video_(false),
       has_data_(false),
       muted_(false),
@@ -207,13 +206,6 @@
   return session->SendInfoMessage(elems, session->remote_name());
 }
 
-void Call::SetLocalRenderer(VideoRenderer* renderer) {
-  local_renderer_ = renderer;
-  if (session_client_->GetFocus() == this) {
-    session_client_->channel_manager()->SetLocalRenderer(renderer);
-  }
-}
-
 void Call::SetVideoRenderer(Session* session, uint32 ssrc,
                             VideoRenderer* renderer) {
   VideoChannel* video_channel = GetVideoChannel(session);
@@ -418,8 +410,6 @@
   for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
     EnableSessionChannels(it->second.session, enable);
   }
-  session_client_->channel_manager()->SetLocalRenderer(
-      (enable) ? local_renderer_ : NULL);
 }
 
 void Call::EnableSessionChannels(Session* session, bool enable) {
diff --git a/session/media/call.h b/session/media/call.h
index a81e74a..be9397e 100644
--- a/session/media/call.h
+++ b/session/media/call.h
@@ -96,7 +96,6 @@
   void Terminate();
   bool SendViewRequest(Session* session,
                        const ViewRequest& view_request);
-  void SetLocalRenderer(VideoRenderer* renderer);
   void SetVideoRenderer(Session* session, uint32 ssrc,
                         VideoRenderer* renderer);
   void StartConnectionMonitor(Session* session, int cms);
@@ -284,7 +283,6 @@
   MediaSessionMap media_session_map_;
 
   std::map<std::string, CurrentSpeakerMonitor*> speaker_monitor_map_;
-  VideoRenderer* local_renderer_;
   bool has_video_;
   bool has_data_;
   bool muted_;
diff --git a/session/media/channelmanager.cc b/session/media/channelmanager.cc
index a1cdcc0..199bc86 100644
--- a/session/media/channelmanager.cc
+++ b/session/media/channelmanager.cc
@@ -137,6 +137,12 @@
       this, &ChannelManager::OnVideoCaptureStateChange);
   capture_manager_->SignalCapturerStateChange.connect(
       this, &ChannelManager::OnVideoCaptureStateChange);
+
+  if (worker_thread_ != rtc::Thread::Current()) {
+    // Do not allow invoking calls to other threads on the worker thread.
+    worker_thread_->Invoke<bool>(
+        rtc::Bind(&rtc::Thread::SetAllowBlockingCalls, worker_thread_, false));
+  }
 }
 
 ChannelManager::~ChannelManager() {
@@ -277,10 +283,6 @@
       if (default_video_encoder_config_.max_codec.id != 0) {
         SetDefaultVideoEncoderConfig(default_video_encoder_config_);
       }
-      // And the local renderer.
-      if (local_renderer_) {
-        SetLocalRenderer(local_renderer_);
-      }
     }
   }
   return initialized_;
@@ -750,19 +752,6 @@
   return ret;
 }
 
-bool ChannelManager::SetLocalRenderer(VideoRenderer* renderer) {
-  bool ret = true;
-  if (initialized_) {
-    ret = worker_thread_->Invoke<bool>(
-        Bind(&MediaEngineInterface::SetLocalRenderer,
-             media_engine_.get(), renderer));
-  }
-  if (ret) {
-    local_renderer_ = renderer;
-  }
-  return ret;
-}
-
 void ChannelManager::SetVoiceLogging(int level, const char* filter) {
   if (initialized_) {
     worker_thread_->Invoke<void>(
diff --git a/session/media/channelmanager.h b/session/media/channelmanager.h
index f7a2890..fa79014 100644
--- a/session/media/channelmanager.h
+++ b/session/media/channelmanager.h
@@ -170,8 +170,6 @@
   // Starts/stops the local microphone and enables polling of the input level.
   bool SetLocalMonitor(bool enable);
   bool monitoring() const { return monitoring_; }
-  // Sets the local renderer where to renderer the local camera.
-  bool SetLocalRenderer(VideoRenderer* renderer);
   bool capturing() const { return capturing_; }
 
   // Configures the logging output of the mediaengine(s).
diff --git a/session/media/channelmanager_unittest.cc b/session/media/channelmanager_unittest.cc
index f234732..98f1400 100644
--- a/session/media/channelmanager_unittest.cc
+++ b/session/media/channelmanager_unittest.cc
@@ -509,22 +509,6 @@
   EXPECT_EQ(60, level);
 }
 
-// Test that a value set before Init is applied properly.
-TEST_F(ChannelManagerTest, SetLocalRendererBeforeInit) {
-  cricket::NullVideoRenderer renderer;
-  EXPECT_TRUE(cm_->SetLocalRenderer(&renderer));
-  EXPECT_TRUE(cm_->Init());
-  EXPECT_EQ(&renderer, fme_->local_renderer());
-}
-
-// Test that a value set after init is passed through properly.
-TEST_F(ChannelManagerTest, SetLocalRenderer) {
-  cricket::NullVideoRenderer renderer;
-  EXPECT_TRUE(cm_->Init());
-  EXPECT_TRUE(cm_->SetLocalRenderer(&renderer));
-  EXPECT_EQ(&renderer, fme_->local_renderer());
-}
-
 // Test that logging options set before Init are applied properly,
 // and retained even after Init.
 TEST_F(ChannelManagerTest, SetLoggingBeforeInit) {
diff --git a/session/media/mediamessages.cc b/session/media/mediamessages.cc
index 1a5094e..6c9f681 100644
--- a/session/media/mediamessages.cc
+++ b/session/media/mediamessages.cc
@@ -34,7 +34,7 @@
 #include "talk/p2p/base/constants.h"
 #include "talk/p2p/base/parsing.h"
 #include "talk/session/media/mediasessionclient.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/stringencode.h"
 
diff --git a/session/media/mediamessages_unittest.cc b/session/media/mediamessages_unittest.cc
index c95053d..9ebd38c 100644
--- a/session/media/mediamessages_unittest.cc
+++ b/session/media/mediamessages_unittest.cc
@@ -32,7 +32,7 @@
 
 #include "talk/p2p/base/constants.h"
 #include "talk/session/media/mediasessionclient.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "webrtc/base/gunit.h"
 #include "webrtc/base/scoped_ptr.h"
 
diff --git a/session/media/mediamonitor.h b/session/media/mediamonitor.h
index d549362..89740a8 100644
--- a/session/media/mediamonitor.h
+++ b/session/media/mediamonitor.h
@@ -34,6 +34,7 @@
 #include "webrtc/base/criticalsection.h"
 #include "webrtc/base/sigslot.h"
 #include "webrtc/base/thread.h"
+#include "webrtc/base/thread_annotations.h"
 
 namespace cricket {
 
@@ -77,7 +78,7 @@
     media_info_.Clear();
     media_channel_->GetStats(&media_info_);
   }
-  virtual void Update() {
+  virtual void Update() EXCLUSIVE_LOCKS_REQUIRED(crit_) {
     MI stats(media_info_);
     crit_.Leave();
     SignalUpdate(media_channel_, stats);
diff --git a/session/media/mediasession.cc b/session/media/mediasession.cc
index 45e321f..92dd257 100644
--- a/session/media/mediasession.cc
+++ b/session/media/mediasession.cc
@@ -1170,28 +1170,28 @@
   if (current_description) {
     ContentInfos::const_iterator it = current_description->contents().begin();
     for (; it != current_description->contents().end(); ++it) {
-      if (IsMediaContentOfType(&*it, MEDIA_TYPE_AUDIO) && options.has_audio) {
+      if (IsMediaContentOfType(&*it, MEDIA_TYPE_AUDIO)) {
         if (!AddAudioContentForOffer(options, current_description,
                                      audio_rtp_extensions, audio_codecs,
                                      &current_streams, offer.get())) {
           return NULL;
         }
         audio_added = true;
-      } else if (IsMediaContentOfType(&*it, MEDIA_TYPE_VIDEO) &&
-                 options.has_video) {
+      } else if (IsMediaContentOfType(&*it, MEDIA_TYPE_VIDEO)) {
         if (!AddVideoContentForOffer(options, current_description,
                                      video_rtp_extensions, video_codecs,
                                      &current_streams, offer.get())) {
           return NULL;
         }
         video_added = true;
-      } else if (IsMediaContentOfType(&*it, MEDIA_TYPE_DATA) &&
-                 options.has_data()) {
+      } else if (IsMediaContentOfType(&*it, MEDIA_TYPE_DATA)) {
         if (!AddDataContentForOffer(options, current_description, &data_codecs,
                                     &current_streams, offer.get())) {
           return NULL;
         }
         data_added = true;
+      } else {
+        ASSERT(false);
       }
     }
   }
@@ -1459,6 +1459,7 @@
 
   bool secure_transport = (transport_desc_factory_->secure() != SEC_DISABLED);
   SetMediaProtocol(secure_transport, audio.get());
+
   desc->AddContent(CN_AUDIO, NS_JINGLE_RTP, audio.release());
   if (!AddTransportOffer(CN_AUDIO, options.transport_options,
                          current_description, desc)) {
diff --git a/session/media/mediasessionclient.cc b/session/media/mediasessionclient.cc
index a1096b6..826909a 100644
--- a/session/media/mediasessionclient.cc
+++ b/session/media/mediasessionclient.cc
@@ -36,8 +36,8 @@
 #include "talk/p2p/base/parsing.h"
 #include "talk/session/media/mediamessages.h"
 #include "talk/session/media/srtpfilter.h"
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlconstants.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlconstants.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/helpers.h"
 #include "webrtc/base/logging.h"
diff --git a/session/media/mediasessionclient_unittest.cc b/session/media/mediasessionclient_unittest.cc
index 3e36b5a..2998228 100644
--- a/session/media/mediasessionclient_unittest.cc
+++ b/session/media/mediasessionclient_unittest.cc
@@ -34,9 +34,9 @@
 #include "talk/p2p/base/constants.h"
 #include "talk/p2p/client/basicportallocator.h"
 #include "talk/session/media/mediasessionclient.h"
-#include "talk/xmllite/xmlbuilder.h"
-#include "talk/xmllite/xmlelement.h"
-#include "talk/xmllite/xmlprinter.h"
+#include "webrtc/libjingle/xmllite/xmlbuilder.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlprinter.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/gunit.h"
 #include "webrtc/base/logging.h"
diff --git a/session/tunnel/securetunnelsessionclient.cc b/session/tunnel/securetunnelsessionclient.cc
index f4a3346..cb41c3b 100644
--- a/session/tunnel/securetunnelsessionclient.cc
+++ b/session/tunnel/securetunnelsessionclient.cc
@@ -30,7 +30,7 @@
 #include "talk/p2p/base/transportchannel.h"
 #include "talk/session/tunnel/pseudotcpchannel.h"
 #include "talk/session/tunnel/securetunnelsessionclient.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "webrtc/base/basicdefs.h"
 #include "webrtc/base/basictypes.h"
 #include "webrtc/base/common.h"
diff --git a/session/tunnel/tunnelsessionclient.cc b/session/tunnel/tunnelsessionclient.cc
index 12275fb..7221db4 100644
--- a/session/tunnel/tunnelsessionclient.cc
+++ b/session/tunnel/tunnelsessionclient.cc
@@ -28,7 +28,7 @@
 #include "pseudotcpchannel.h"
 #include "talk/p2p/base/constants.h"
 #include "talk/p2p/base/transportchannel.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "tunnelsessionclient.h"
 #include "webrtc/base/basicdefs.h"
 #include "webrtc/base/basictypes.h"
diff --git a/session/tunnel/tunnelsessionclient.h b/session/tunnel/tunnelsessionclient.h
index ac12faf..d3fa64a 100644
--- a/session/tunnel/tunnelsessionclient.h
+++ b/session/tunnel/tunnelsessionclient.h
@@ -36,7 +36,7 @@
 #include "talk/p2p/base/sessionclient.h"
 #include "talk/p2p/base/sessiondescription.h"
 #include "talk/p2p/base/sessionmanager.h"
-#include "talk/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/qname.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/criticalsection.h"
 #include "webrtc/base/stream.h"
diff --git a/sound/alsasoundsystem.cc b/sound/alsasoundsystem.cc
deleted file mode 100644
index d9960bb..0000000
--- a/sound/alsasoundsystem.cc
+++ /dev/null
@@ -1,761 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/alsasoundsystem.h"
-
-#include "talk/sound/sounddevicelocator.h"
-#include "talk/sound/soundinputstreaminterface.h"
-#include "talk/sound/soundoutputstreaminterface.h"
-#include "webrtc/base/common.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/base/stringutils.h"
-#include "webrtc/base/timeutils.h"
-#include "webrtc/base/worker.h"
-
-namespace cricket {
-
-// Lookup table from the cricket format enum in soundsysteminterface.h to
-// ALSA's enums.
-static const snd_pcm_format_t kCricketFormatToAlsaFormatTable[] = {
-  // The order here must match the order in soundsysteminterface.h
-  SND_PCM_FORMAT_S16_LE,
-};
-
-// Lookup table for the size of a single sample of a given format.
-static const size_t kCricketFormatToSampleSizeTable[] = {
-  // The order here must match the order in soundsysteminterface.h
-  sizeof(int16_t),  // 2
-};
-
-// Minimum latency we allow, in microseconds. This is more or less arbitrary,
-// but it has to be at least large enough to be able to buffer data during a
-// missed context switch, and the typical Linux scheduling quantum is 10ms.
-static const int kMinimumLatencyUsecs = 20 * 1000;
-
-// The latency we'll use for kNoLatencyRequirements (chosen arbitrarily).
-static const int kDefaultLatencyUsecs = kMinimumLatencyUsecs * 2;
-
-// We translate newlines in ALSA device descriptions to hyphens.
-static const char kAlsaDescriptionSearch[] = "\n";
-static const char kAlsaDescriptionReplace[] = " - ";
-
-class AlsaDeviceLocator : public SoundDeviceLocator {
- public:
-  AlsaDeviceLocator(const std::string &name,
-                    const std::string &device_name)
-      : SoundDeviceLocator(name, device_name) {
-    // The ALSA descriptions have newlines in them, which won't show up in
-    // a drop-down box. Replace them with hyphens.
-    rtc::replace_substrs(kAlsaDescriptionSearch,
-                               sizeof(kAlsaDescriptionSearch) - 1,
-                               kAlsaDescriptionReplace,
-                               sizeof(kAlsaDescriptionReplace) - 1,
-                               &name_);
-  }
-
-  virtual SoundDeviceLocator *Copy() const {
-    return new AlsaDeviceLocator(*this);
-  }
-};
-
-// Functionality that is common to both AlsaInputStream and AlsaOutputStream.
-class AlsaStream {
- public:
-  AlsaStream(AlsaSoundSystem *alsa,
-             snd_pcm_t *handle,
-             size_t frame_size,
-             int wait_timeout_ms,
-             int flags,
-             int freq)
-      : alsa_(alsa),
-        handle_(handle),
-        frame_size_(frame_size),
-        wait_timeout_ms_(wait_timeout_ms),
-        flags_(flags),
-        freq_(freq) {
-  }
-
-  ~AlsaStream() {
-    Close();
-  }
-
-  // Waits for the stream to be ready to accept/return more data, and returns
-  // how much can be written/read, or 0 if we need to Wait() again.
-  snd_pcm_uframes_t Wait() {
-    snd_pcm_sframes_t frames;
-    // Ideally we would not use snd_pcm_wait() and instead hook snd_pcm_poll_*
-    // into PhysicalSocketServer, but PhysicalSocketServer is nasty enough
-    // already and the current clients of SoundSystemInterface do not run
-    // anything else on their worker threads, so snd_pcm_wait() is good enough.
-    frames = symbol_table()->snd_pcm_avail_update()(handle_);
-    if (frames < 0) {
-      LOG(LS_ERROR) << "snd_pcm_avail_update(): " << GetError(frames);
-      Recover(frames);
-      return 0;
-    } else if (frames > 0) {
-      // Already ready, so no need to wait.
-      return frames;
-    }
-    // Else no space/data available, so must wait.
-    int ready = symbol_table()->snd_pcm_wait()(handle_, wait_timeout_ms_);
-    if (ready < 0) {
-      LOG(LS_ERROR) << "snd_pcm_wait(): " << GetError(ready);
-      Recover(ready);
-      return 0;
-    } else if (ready == 0) {
-      // Timeout, so nothing can be written/read right now.
-      // We set the timeout to twice the requested latency, so continuous
-      // timeouts are indicative of a problem, so log as a warning.
-      LOG(LS_WARNING) << "Timeout while waiting on stream";
-      return 0;
-    }
-    // Else ready > 0 (i.e., 1), so it's ready. Get count.
-    frames = symbol_table()->snd_pcm_avail_update()(handle_);
-    if (frames < 0) {
-      LOG(LS_ERROR) << "snd_pcm_avail_update(): " << GetError(frames);
-      Recover(frames);
-      return 0;
-    } else if (frames == 0) {
-      // wait() said we were ready, so this ought to have been positive. Has
-      // been observed to happen in practice though.
-      LOG(LS_WARNING) << "Spurious wake-up";
-    }
-    return frames;
-  }
-
-  int CurrentDelayUsecs() {
-    if (!(flags_ & SoundSystemInterface::FLAG_REPORT_LATENCY)) {
-      return 0;
-    }
-
-    snd_pcm_sframes_t delay;
-    int err = symbol_table()->snd_pcm_delay()(handle_, &delay);
-    if (err != 0) {
-      LOG(LS_ERROR) << "snd_pcm_delay(): " << GetError(err);
-      Recover(err);
-      // We'd rather continue playout/capture with an incorrect delay than stop
-      // it altogether, so return a valid value.
-      return 0;
-    }
-    // The delay is in frames. Convert to microseconds.
-    return delay * rtc::kNumMicrosecsPerSec / freq_;
-  }
-
-  // Used to recover from certain recoverable errors, principally buffer overrun
-  // or underrun (identified as EPIPE). Without calling this the stream stays
-  // in the error state forever.
-  bool Recover(int error) {
-    int err;
-    err = symbol_table()->snd_pcm_recover()(
-        handle_,
-        error,
-        // Silent; i.e., no logging on stderr.
-        1);
-    if (err != 0) {
-      // Docs say snd_pcm_recover returns the original error if it is not one
-      // of the recoverable ones, so this log message will probably contain the
-      // same error twice.
-      LOG(LS_ERROR) << "Unable to recover from \"" << GetError(error) << "\": "
-                    << GetError(err);
-      return false;
-    }
-    if (error == -EPIPE &&  // Buffer underrun/overrun.
-        symbol_table()->snd_pcm_stream()(handle_) == SND_PCM_STREAM_CAPTURE) {
-      // For capture streams we also have to repeat the explicit start() to get
-      // data flowing again.
-      err = symbol_table()->snd_pcm_start()(handle_);
-      if (err != 0) {
-        LOG(LS_ERROR) << "snd_pcm_start(): " << GetError(err);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  bool Close() {
-    if (handle_) {
-      int err;
-      err = symbol_table()->snd_pcm_drop()(handle_);
-      if (err != 0) {
-        LOG(LS_ERROR) << "snd_pcm_drop(): " << GetError(err);
-        // Continue anyways.
-      }
-      err = symbol_table()->snd_pcm_close()(handle_);
-      if (err != 0) {
-        LOG(LS_ERROR) << "snd_pcm_close(): " << GetError(err);
-        // Continue anyways.
-      }
-      handle_ = NULL;
-    }
-    return true;
-  }
-
-  AlsaSymbolTable *symbol_table() {
-    return &alsa_->symbol_table_;
-  }
-
-  snd_pcm_t *handle() {
-    return handle_;
-  }
-
-  const char *GetError(int err) {
-    return alsa_->GetError(err);
-  }
-
-  size_t frame_size() {
-    return frame_size_;
-  }
-
- private:
-  AlsaSoundSystem *alsa_;
-  snd_pcm_t *handle_;
-  size_t frame_size_;
-  int wait_timeout_ms_;
-  int flags_;
-  int freq_;
-
-  DISALLOW_COPY_AND_ASSIGN(AlsaStream);
-};
-
-// Implementation of an input stream. See soundinputstreaminterface.h regarding
-// thread-safety.
-class AlsaInputStream :
-    public SoundInputStreamInterface,
-    private rtc::Worker {
- public:
-  AlsaInputStream(AlsaSoundSystem *alsa,
-                  snd_pcm_t *handle,
-                  size_t frame_size,
-                  int wait_timeout_ms,
-                  int flags,
-                  int freq)
-      : stream_(alsa, handle, frame_size, wait_timeout_ms, flags, freq),
-        buffer_size_(0) {
-  }
-
-  virtual ~AlsaInputStream() {
-    bool success = StopReading();
-    // We need that to live.
-    VERIFY(success);
-  }
-
-  virtual bool StartReading() {
-    return StartWork();
-  }
-
-  virtual bool StopReading() {
-    return StopWork();
-  }
-
-  virtual bool GetVolume(int *volume) {
-    // TODO: Implement this.
-    return false;
-  }
-
-  virtual bool SetVolume(int volume) {
-    // TODO: Implement this.
-    return false;
-  }
-
-  virtual bool Close() {
-    return StopReading() && stream_.Close();
-  }
-
-  virtual int LatencyUsecs() {
-    return stream_.CurrentDelayUsecs();
-  }
-
- private:
-  // Inherited from Worker.
-  virtual void OnStart() {
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnHaveWork() {
-    // Block waiting for data.
-    snd_pcm_uframes_t avail = stream_.Wait();
-    if (avail > 0) {
-      // Data is available.
-      size_t size = avail * stream_.frame_size();
-      if (size > buffer_size_) {
-        // Must increase buffer size.
-        buffer_.reset(new char[size]);
-        buffer_size_ = size;
-      }
-      // Read all the data.
-      snd_pcm_sframes_t read = stream_.symbol_table()->snd_pcm_readi()(
-          stream_.handle(),
-          buffer_.get(),
-          avail);
-      if (read < 0) {
-        LOG(LS_ERROR) << "snd_pcm_readi(): " << GetError(read);
-        stream_.Recover(read);
-      } else if (read == 0) {
-        // Docs say this shouldn't happen.
-        ASSERT(false);
-        LOG(LS_ERROR) << "No data?";
-      } else {
-        // Got data. Pass it off to the app.
-        SignalSamplesRead(buffer_.get(),
-                          read * stream_.frame_size(),
-                          this);
-      }
-    }
-    // Check for more data with no delay, after any pending messages are
-    // dispatched.
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStop() {
-    // Nothing to do.
-  }
-
-  const char *GetError(int err) {
-    return stream_.GetError(err);
-  }
-
-  AlsaStream stream_;
-  rtc::scoped_ptr<char[]> buffer_;
-  size_t buffer_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(AlsaInputStream);
-};
-
-// Implementation of an output stream. See soundoutputstreaminterface.h
-// regarding thread-safety.
-class AlsaOutputStream :
-    public SoundOutputStreamInterface,
-    private rtc::Worker {
- public:
-  AlsaOutputStream(AlsaSoundSystem *alsa,
-                   snd_pcm_t *handle,
-                   size_t frame_size,
-                   int wait_timeout_ms,
-                   int flags,
-                   int freq)
-      : stream_(alsa, handle, frame_size, wait_timeout_ms, flags, freq) {
-  }
-
-  virtual ~AlsaOutputStream() {
-    bool success = DisableBufferMonitoring();
-    // We need that to live.
-    VERIFY(success);
-  }
-
-  virtual bool EnableBufferMonitoring() {
-    return StartWork();
-  }
-
-  virtual bool DisableBufferMonitoring() {
-    return StopWork();
-  }
-
-  virtual bool WriteSamples(const void *sample_data,
-                            size_t size) {
-    if (size % stream_.frame_size() != 0) {
-      // No client of SoundSystemInterface does this, so let's not support it.
-      // (If we wanted to support it, we'd basically just buffer the fractional
-      // frame until we get more data.)
-      ASSERT(false);
-      LOG(LS_ERROR) << "Writes with fractional frames are not supported";
-      return false;
-    }
-    snd_pcm_uframes_t frames = size / stream_.frame_size();
-    snd_pcm_sframes_t written = stream_.symbol_table()->snd_pcm_writei()(
-        stream_.handle(),
-        sample_data,
-        frames);
-    if (written < 0) {
-      LOG(LS_ERROR) << "snd_pcm_writei(): " << GetError(written);
-      stream_.Recover(written);
-      return false;
-    } else if (static_cast<snd_pcm_uframes_t>(written) < frames) {
-      // Shouldn't happen. Drop the rest of the data.
-      LOG(LS_ERROR) << "Stream wrote only " << written << " of " << frames
-                    << " frames!";
-      return false;
-    }
-    return true;
-  }
-
-  virtual bool GetVolume(int *volume) {
-    // TODO: Implement this.
-    return false;
-  }
-
-  virtual bool SetVolume(int volume) {
-    // TODO: Implement this.
-    return false;
-  }
-
-  virtual bool Close() {
-    return DisableBufferMonitoring() && stream_.Close();
-  }
-
-  virtual int LatencyUsecs() {
-    return stream_.CurrentDelayUsecs();
-  }
-
- private:
-  // Inherited from Worker.
-  virtual void OnStart() {
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnHaveWork() {
-    snd_pcm_uframes_t avail = stream_.Wait();
-    if (avail > 0) {
-      size_t space = avail * stream_.frame_size();
-      SignalBufferSpace(space, this);
-    }
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStop() {
-    // Nothing to do.
-  }
-
-  const char *GetError(int err) {
-    return stream_.GetError(err);
-  }
-
-  AlsaStream stream_;
-
-  DISALLOW_COPY_AND_ASSIGN(AlsaOutputStream);
-};
-
-AlsaSoundSystem::AlsaSoundSystem() : initialized_(false) {}
-
-AlsaSoundSystem::~AlsaSoundSystem() {
-  // Not really necessary, because Terminate() doesn't really do anything.
-  Terminate();
-}
-
-bool AlsaSoundSystem::Init() {
-  if (IsInitialized()) {
-    return true;
-  }
-
-  // Load libasound.
-  if (!symbol_table_.Load()) {
-    // Very odd for a Linux machine to not have a working libasound ...
-    LOG(LS_ERROR) << "Failed to load symbol table";
-    return false;
-  }
-
-  initialized_ = true;
-
-  return true;
-}
-
-void AlsaSoundSystem::Terminate() {
-  if (!IsInitialized()) {
-    return;
-  }
-
-  initialized_ = false;
-
-  // We do not unload the symbol table because we may need it again soon if
-  // Init() is called again.
-}
-
-bool AlsaSoundSystem::EnumeratePlaybackDevices(
-    SoundDeviceLocatorList *devices) {
-  return EnumerateDevices(devices, false);
-}
-
-bool AlsaSoundSystem::EnumerateCaptureDevices(
-    SoundDeviceLocatorList *devices) {
-  return EnumerateDevices(devices, true);
-}
-
-bool AlsaSoundSystem::GetDefaultPlaybackDevice(SoundDeviceLocator **device) {
-  return GetDefaultDevice(device);
-}
-
-bool AlsaSoundSystem::GetDefaultCaptureDevice(SoundDeviceLocator **device) {
-  return GetDefaultDevice(device);
-}
-
-SoundOutputStreamInterface *AlsaSoundSystem::OpenPlaybackDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return OpenDevice<SoundOutputStreamInterface>(
-      device,
-      params,
-      SND_PCM_STREAM_PLAYBACK,
-      &AlsaSoundSystem::StartOutputStream);
-}
-
-SoundInputStreamInterface *AlsaSoundSystem::OpenCaptureDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return OpenDevice<SoundInputStreamInterface>(
-      device,
-      params,
-      SND_PCM_STREAM_CAPTURE,
-      &AlsaSoundSystem::StartInputStream);
-}
-
-const char *AlsaSoundSystem::GetName() const {
-  return "ALSA";
-}
-
-bool AlsaSoundSystem::EnumerateDevices(
-    SoundDeviceLocatorList *devices,
-    bool capture_not_playback) {
-  ClearSoundDeviceLocatorList(devices);
-
-  if (!IsInitialized()) {
-    return false;
-  }
-
-  const char *type = capture_not_playback ? "Input" : "Output";
-  // dmix and dsnoop are only for playback and capture, respectively, but ALSA
-  // stupidly includes them in both lists.
-  const char *ignore_prefix = capture_not_playback ? "dmix:" : "dsnoop:";
-  // (ALSA lists many more "devices" of questionable interest, but we show them
-  // just in case the weird devices may actually be desirable for some
-  // users/systems.)
-  const char *ignore_default = "default";
-  const char *ignore_null = "null";
-  const char *ignore_pulse = "pulse";
-  // The 'pulse' entry has a habit of mysteriously disappearing when you query
-  // a second time. Remove it from our list. (GIPS lib did the same thing.)
-  int err;
-
-  void **hints;
-  err = symbol_table_.snd_device_name_hint()(-1,     // All cards
-                                             "pcm",  // Only PCM devices
-                                             &hints);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_device_name_hint(): " << GetError(err);
-    return false;
-  }
-
-  for (void **list = hints; *list != NULL; ++list) {
-    char *actual_type = symbol_table_.snd_device_name_get_hint()(*list, "IOID");
-    if (actual_type) {  // NULL means it's both.
-      bool wrong_type = (strcmp(actual_type, type) != 0);
-      free(actual_type);
-      if (wrong_type) {
-        // Wrong type of device (i.e., input vs. output).
-        continue;
-      }
-    }
-
-    char *name = symbol_table_.snd_device_name_get_hint()(*list, "NAME");
-    if (!name) {
-      LOG(LS_ERROR) << "Device has no name???";
-      // Skip it.
-      continue;
-    }
-
-    // Now check if we actually want to show this device.
-    if (strcmp(name, ignore_default) != 0 &&
-        strcmp(name, ignore_null) != 0 &&
-        strcmp(name, ignore_pulse) != 0 &&
-        !rtc::starts_with(name, ignore_prefix)) {
-
-      // Yes, we do.
-      char *desc = symbol_table_.snd_device_name_get_hint()(*list, "DESC");
-      if (!desc) {
-        // Virtual devices don't necessarily have descriptions. Use their names
-        // instead (not pretty!).
-        desc = name;
-      }
-
-      AlsaDeviceLocator *device = new AlsaDeviceLocator(desc, name);
-
-      devices->push_back(device);
-
-      if (desc != name) {
-        free(desc);
-      }
-    }
-
-    free(name);
-  }
-
-  err = symbol_table_.snd_device_name_free_hint()(hints);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_device_name_free_hint(): " << GetError(err);
-    // Continue and return true anyways, since we did get the whole list.
-  }
-
-  return true;
-}
-
-bool AlsaSoundSystem::GetDefaultDevice(SoundDeviceLocator **device) {
-  if (!IsInitialized()) {
-    return false;
-  }
-  *device = new AlsaDeviceLocator("Default device", "default");
-  return true;
-}
-
-inline size_t AlsaSoundSystem::FrameSize(const OpenParams &params) {
-  ASSERT(static_cast<int>(params.format) <
-         ARRAY_SIZE(kCricketFormatToSampleSizeTable));
-  return kCricketFormatToSampleSizeTable[params.format] * params.channels;
-}
-
-template <typename StreamInterface>
-StreamInterface *AlsaSoundSystem::OpenDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params,
-    snd_pcm_stream_t type,
-    StreamInterface *(AlsaSoundSystem::*start_fn)(
-        snd_pcm_t *handle,
-        size_t frame_size,
-        int wait_timeout_ms,
-        int flags,
-        int freq)) {
-
-  if (!IsInitialized()) {
-    return NULL;
-  }
-
-  StreamInterface *stream;
-  int err;
-
-  const char *dev = static_cast<const AlsaDeviceLocator *>(device)->
-      device_name().c_str();
-
-  snd_pcm_t *handle = NULL;
-  err = symbol_table_.snd_pcm_open()(
-      &handle,
-      dev,
-      type,
-      // No flags.
-      0);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_pcm_open(" << dev << "): " << GetError(err);
-    return NULL;
-  }
-  LOG(LS_VERBOSE) << "Opening " << dev;
-  ASSERT(handle);  // If open succeeded, handle ought to be valid
-
-  // Compute requested latency in microseconds.
-  int latency;
-  if (params.latency == kNoLatencyRequirements) {
-    latency = kDefaultLatencyUsecs;
-  } else {
-    // kLowLatency is 0, so we treat it the same as a request for zero latency.
-    // Compute what the user asked for.
-    latency = rtc::kNumMicrosecsPerSec *
-        params.latency /
-        params.freq /
-        FrameSize(params);
-    // And this is what we'll actually use.
-    latency = rtc::_max(latency, kMinimumLatencyUsecs);
-  }
-
-  ASSERT(static_cast<int>(params.format) <
-         ARRAY_SIZE(kCricketFormatToAlsaFormatTable));
-
-  err = symbol_table_.snd_pcm_set_params()(
-      handle,
-      kCricketFormatToAlsaFormatTable[params.format],
-      // SoundSystemInterface only supports interleaved audio.
-      SND_PCM_ACCESS_RW_INTERLEAVED,
-      params.channels,
-      params.freq,
-      1,  // Allow ALSA to resample.
-      latency);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_pcm_set_params(): " << GetError(err);
-    goto fail;
-  }
-
-  err = symbol_table_.snd_pcm_prepare()(handle);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_pcm_prepare(): " << GetError(err);
-    goto fail;
-  }
-
-  stream = (this->*start_fn)(
-      handle,
-      FrameSize(params),
-      // We set the wait time to twice the requested latency, so that wait
-      // timeouts should be rare.
-      2 * latency / rtc::kNumMicrosecsPerMillisec,
-      params.flags,
-      params.freq);
-  if (stream) {
-    return stream;
-  }
-  // Else fall through.
-
- fail:
-  err = symbol_table_.snd_pcm_close()(handle);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_pcm_close(): " << GetError(err);
-  }
-  return NULL;
-}
-
-SoundOutputStreamInterface *AlsaSoundSystem::StartOutputStream(
-    snd_pcm_t *handle,
-    size_t frame_size,
-    int wait_timeout_ms,
-    int flags,
-    int freq) {
-  // Nothing to do here but instantiate the stream.
-  return new AlsaOutputStream(
-      this, handle, frame_size, wait_timeout_ms, flags, freq);
-}
-
-SoundInputStreamInterface *AlsaSoundSystem::StartInputStream(
-    snd_pcm_t *handle,
-    size_t frame_size,
-    int wait_timeout_ms,
-    int flags,
-    int freq) {
-  // Output streams start automatically once enough data has been written, but
-  // input streams must be started manually or else snd_pcm_wait() will never
-  // return true.
-  int err;
-  err = symbol_table_.snd_pcm_start()(handle);
-  if (err != 0) {
-    LOG(LS_ERROR) << "snd_pcm_start(): " << GetError(err);
-    return NULL;
-  }
-  return new AlsaInputStream(
-      this, handle, frame_size, wait_timeout_ms, flags, freq);
-}
-
-inline const char *AlsaSoundSystem::GetError(int err) {
-  return symbol_table_.snd_strerror()(err);
-}
-
-}  // namespace cricket
diff --git a/sound/alsasoundsystem.h b/sound/alsasoundsystem.h
deleted file mode 100644
index b3abfb0..0000000
--- a/sound/alsasoundsystem.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_ALSASOUNDSYSTEM_H_
-#define TALK_SOUND_ALSASOUNDSYSTEM_H_
-
-#include "talk/sound/alsasymboltable.h"
-#include "talk/sound/soundsysteminterface.h"
-#include "webrtc/base/constructormagic.h"
-
-namespace cricket {
-
-class AlsaStream;
-class AlsaInputStream;
-class AlsaOutputStream;
-
-// Sound system implementation for ALSA, the predominant sound device API on
-// Linux (but typically not used directly by applications anymore).
-class AlsaSoundSystem : public SoundSystemInterface {
-  friend class AlsaStream;
-  friend class AlsaInputStream;
-  friend class AlsaOutputStream;
- public:
-  static SoundSystemInterface *Create() {
-    return new AlsaSoundSystem();
-  }
-
-  AlsaSoundSystem();
-
-  virtual ~AlsaSoundSystem();
-
-  virtual bool Init();
-  virtual void Terminate();
-
-  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
-  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
-
-  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
-  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
-
-  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-  virtual SoundInputStreamInterface *OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-
-  virtual const char *GetName() const;
-
- private:
-  bool IsInitialized() { return initialized_; }
-
-  bool EnumerateDevices(SoundDeviceLocatorList *devices,
-                        bool capture_not_playback);
-
-  bool GetDefaultDevice(SoundDeviceLocator **device);
-
-  static size_t FrameSize(const OpenParams &params);
-
-  template <typename StreamInterface>
-  StreamInterface *OpenDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params,
-      snd_pcm_stream_t type,
-      StreamInterface *(AlsaSoundSystem::*start_fn)(
-          snd_pcm_t *handle,
-          size_t frame_size,
-          int wait_timeout_ms,
-          int flags,
-          int freq));
-
-  SoundOutputStreamInterface *StartOutputStream(
-      snd_pcm_t *handle,
-      size_t frame_size,
-      int wait_timeout_ms,
-      int flags,
-      int freq);
-
-  SoundInputStreamInterface *StartInputStream(
-      snd_pcm_t *handle,
-      size_t frame_size,
-      int wait_timeout_ms,
-      int flags,
-      int freq);
-
-  const char *GetError(int err);
-
-  bool initialized_;
-  AlsaSymbolTable symbol_table_;
-
-  DISALLOW_COPY_AND_ASSIGN(AlsaSoundSystem);
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_ALSASOUNDSYSTEM_H_
diff --git a/sound/alsasymboltable.cc b/sound/alsasymboltable.cc
deleted file mode 100644
index 570b4b4..0000000
--- a/sound/alsasymboltable.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/alsasymboltable.h"
-
-namespace cricket {
-
-#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME ALSA_SYMBOLS_CLASS_NAME
-#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST ALSA_SYMBOLS_LIST
-#define LATE_BINDING_SYMBOL_TABLE_DLL_NAME "libasound.so.2"
-#include "webrtc/base/latebindingsymboltable.cc.def"
-
-}  // namespace cricket
diff --git a/sound/alsasymboltable.h b/sound/alsasymboltable.h
deleted file mode 100644
index 98f1645..0000000
--- a/sound/alsasymboltable.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_ALSASYMBOLTABLE_H_
-#define TALK_SOUND_ALSASYMBOLTABLE_H_
-
-#include <alsa/asoundlib.h>
-
-#include "webrtc/base/latebindingsymboltable.h"
-
-namespace cricket {
-
-#define ALSA_SYMBOLS_CLASS_NAME AlsaSymbolTable
-// The ALSA symbols we need, as an X-Macro list.
-// This list must contain precisely every libasound function that is used in
-// alsasoundsystem.cc.
-#define ALSA_SYMBOLS_LIST \
-  X(snd_device_name_free_hint) \
-  X(snd_device_name_get_hint) \
-  X(snd_device_name_hint) \
-  X(snd_pcm_avail_update) \
-  X(snd_pcm_close) \
-  X(snd_pcm_delay) \
-  X(snd_pcm_drop) \
-  X(snd_pcm_open) \
-  X(snd_pcm_prepare) \
-  X(snd_pcm_readi) \
-  X(snd_pcm_recover) \
-  X(snd_pcm_set_params) \
-  X(snd_pcm_start) \
-  X(snd_pcm_stream) \
-  X(snd_pcm_wait) \
-  X(snd_pcm_writei) \
-  X(snd_strerror)
-
-#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME ALSA_SYMBOLS_CLASS_NAME
-#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST ALSA_SYMBOLS_LIST
-#include "webrtc/base/latebindingsymboltable.h.def"
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_ALSASYMBOLTABLE_H_
diff --git a/sound/automaticallychosensoundsystem.h b/sound/automaticallychosensoundsystem.h
deleted file mode 100644
index 10ea0df..0000000
--- a/sound/automaticallychosensoundsystem.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
-#define TALK_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
-
-#include "talk/sound/soundsysteminterface.h"
-#include "talk/sound/soundsystemproxy.h"
-#include "webrtc/base/common.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace cricket {
-
-// A function type that creates an instance of a sound system implementation.
-typedef SoundSystemInterface *(*SoundSystemCreator)();
-
-// An AutomaticallyChosenSoundSystem is a sound system proxy that defers to
-// an instance of the first sound system implementation in a list that
-// successfully initializes.
-template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
-class AutomaticallyChosenSoundSystem : public SoundSystemProxy {
- public:
-  // Chooses and initializes the underlying sound system.
-  virtual bool Init();
-  // Terminates the underlying sound system implementation, but caches it for
-  // future re-use.
-  virtual void Terminate();
-
-  virtual const char *GetName() const;
-
- private:
-  rtc::scoped_ptr<SoundSystemInterface> sound_systems_[kNumSoundSystems];
-};
-
-template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
-bool AutomaticallyChosenSoundSystem<kSoundSystemCreators,
-                                    kNumSoundSystems>::Init() {
-  if (wrapped_) {
-    return true;
-  }
-  for (int i = 0; i < kNumSoundSystems; ++i) {
-    if (!sound_systems_[i].get()) {
-      sound_systems_[i].reset((*kSoundSystemCreators[i])());
-    }
-    if (sound_systems_[i]->Init()) {
-      // This is the first sound system in the list to successfully
-      // initialize, so we're done.
-      wrapped_ = sound_systems_[i].get();
-      break;
-    }
-    // Else it failed to initialize, so try the remaining ones.
-  }
-  if (!wrapped_) {
-    LOG(LS_ERROR) << "Failed to find a usable sound system";
-    return false;
-  }
-  LOG(LS_INFO) << "Selected " << wrapped_->GetName() << " sound system";
-  return true;
-}
-
-template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
-void AutomaticallyChosenSoundSystem<kSoundSystemCreators,
-                                    kNumSoundSystems>::Terminate() {
-  if (!wrapped_) {
-    return;
-  }
-  wrapped_->Terminate();
-  wrapped_ = NULL;
-  // We do not free the scoped_ptrs because we may be re-init'ed soon.
-}
-
-template <const SoundSystemCreator kSoundSystemCreators[], int kNumSoundSystems>
-const char *AutomaticallyChosenSoundSystem<kSoundSystemCreators,
-                                           kNumSoundSystems>::GetName() const {
-  return wrapped_ ? wrapped_->GetName() : "automatic";
-}
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_AUTOMATICALLYCHOSENSOUNDSYSTEM_H_
diff --git a/sound/automaticallychosensoundsystem_unittest.cc b/sound/automaticallychosensoundsystem_unittest.cc
deleted file mode 100644
index 813828d..0000000
--- a/sound/automaticallychosensoundsystem_unittest.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/automaticallychosensoundsystem.h"
-#include "talk/sound/nullsoundsystem.h"
-#include "webrtc/base/gunit.h"
-
-namespace cricket {
-
-class NeverFailsToFailSoundSystem : public NullSoundSystem {
- public:
-  // Overrides superclass.
-  virtual bool Init() {
-    return false;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new NeverFailsToFailSoundSystem();
-  }
-};
-
-class InitCheckingSoundSystem1 : public NullSoundSystem {
- public:
-  // Overrides superclass.
-  virtual bool Init() {
-    created_ = true;
-    return true;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new InitCheckingSoundSystem1();
-  }
-
-  static bool created_;
-};
-
-bool InitCheckingSoundSystem1::created_ = false;
-
-class InitCheckingSoundSystem2 : public NullSoundSystem {
- public:
-  // Overrides superclass.
-  virtual bool Init() {
-    created_ = true;
-    return true;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new InitCheckingSoundSystem2();
-  }
-
-  static bool created_;
-};
-
-bool InitCheckingSoundSystem2::created_ = false;
-
-class DeletionCheckingSoundSystem1 : public NeverFailsToFailSoundSystem {
- public:
-  virtual ~DeletionCheckingSoundSystem1() {
-    deleted_ = true;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new DeletionCheckingSoundSystem1();
-  }
-
-  static bool deleted_;
-};
-
-bool DeletionCheckingSoundSystem1::deleted_ = false;
-
-class DeletionCheckingSoundSystem2 : public NeverFailsToFailSoundSystem {
- public:
-  virtual ~DeletionCheckingSoundSystem2() {
-    deleted_ = true;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new DeletionCheckingSoundSystem2();
-  }
-
-  static bool deleted_;
-};
-
-bool DeletionCheckingSoundSystem2::deleted_ = false;
-
-class DeletionCheckingSoundSystem3 : public NullSoundSystem {
- public:
-  virtual ~DeletionCheckingSoundSystem3() {
-    deleted_ = true;
-  }
-
-  static SoundSystemInterface *Create() {
-    return new DeletionCheckingSoundSystem3();
-  }
-
-  static bool deleted_;
-};
-
-bool DeletionCheckingSoundSystem3::deleted_ = false;
-
-extern const SoundSystemCreator kSingleSystemFailingCreators[] = {
-  &NeverFailsToFailSoundSystem::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, SingleSystemFailing) {
-  AutomaticallyChosenSoundSystem<
-      kSingleSystemFailingCreators,
-      ARRAY_SIZE(kSingleSystemFailingCreators)> sound_system;
-  EXPECT_FALSE(sound_system.Init());
-}
-
-extern const SoundSystemCreator kSingleSystemSucceedingCreators[] = {
-  &NullSoundSystem::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, SingleSystemSucceeding) {
-  AutomaticallyChosenSoundSystem<
-      kSingleSystemSucceedingCreators,
-      ARRAY_SIZE(kSingleSystemSucceedingCreators)> sound_system;
-  EXPECT_TRUE(sound_system.Init());
-}
-
-extern const SoundSystemCreator
-    kFailedFirstSystemResultsInUsingSecondCreators[] = {
-  &NeverFailsToFailSoundSystem::Create,
-  &NullSoundSystem::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, FailedFirstSystemResultsInUsingSecond) {
-  AutomaticallyChosenSoundSystem<
-      kFailedFirstSystemResultsInUsingSecondCreators,
-      ARRAY_SIZE(kFailedFirstSystemResultsInUsingSecondCreators)> sound_system;
-  EXPECT_TRUE(sound_system.Init());
-}
-
-extern const SoundSystemCreator kEarlierEntriesHavePriorityCreators[] = {
-  &InitCheckingSoundSystem1::Create,
-  &InitCheckingSoundSystem2::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, EarlierEntriesHavePriority) {
-  AutomaticallyChosenSoundSystem<
-      kEarlierEntriesHavePriorityCreators,
-      ARRAY_SIZE(kEarlierEntriesHavePriorityCreators)> sound_system;
-  InitCheckingSoundSystem1::created_ = false;
-  InitCheckingSoundSystem2::created_ = false;
-  EXPECT_TRUE(sound_system.Init());
-  EXPECT_TRUE(InitCheckingSoundSystem1::created_);
-  EXPECT_FALSE(InitCheckingSoundSystem2::created_);
-}
-
-extern const SoundSystemCreator kManySoundSystemsCreators[] = {
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-  &NullSoundSystem::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, ManySoundSystems) {
-  AutomaticallyChosenSoundSystem<
-      kManySoundSystemsCreators,
-      ARRAY_SIZE(kManySoundSystemsCreators)> sound_system;
-  EXPECT_TRUE(sound_system.Init());
-}
-
-extern const SoundSystemCreator kDeletesAllCreatedSoundSystemsCreators[] = {
-  &DeletionCheckingSoundSystem1::Create,
-  &DeletionCheckingSoundSystem2::Create,
-  &DeletionCheckingSoundSystem3::Create,
-};
-
-TEST(AutomaticallyChosenSoundSystem, DeletesAllCreatedSoundSystems) {
-  typedef AutomaticallyChosenSoundSystem<
-      kDeletesAllCreatedSoundSystemsCreators,
-      ARRAY_SIZE(kDeletesAllCreatedSoundSystemsCreators)> TestSoundSystem;
-  TestSoundSystem *sound_system = new TestSoundSystem();
-  DeletionCheckingSoundSystem1::deleted_ = false;
-  DeletionCheckingSoundSystem2::deleted_ = false;
-  DeletionCheckingSoundSystem3::deleted_ = false;
-  EXPECT_TRUE(sound_system->Init());
-  delete sound_system;
-  EXPECT_TRUE(DeletionCheckingSoundSystem1::deleted_);
-  EXPECT_TRUE(DeletionCheckingSoundSystem2::deleted_);
-  EXPECT_TRUE(DeletionCheckingSoundSystem3::deleted_);
-}
-
-}  // namespace cricket
diff --git a/sound/linuxsoundsystem.cc b/sound/linuxsoundsystem.cc
deleted file mode 100644
index 7980a15..0000000
--- a/sound/linuxsoundsystem.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/linuxsoundsystem.h"
-
-#include "talk/sound/alsasoundsystem.h"
-#include "talk/sound/pulseaudiosoundsystem.h"
-
-namespace cricket {
-
-const SoundSystemCreator kLinuxSoundSystemCreators[] = {
-#ifdef HAVE_LIBPULSE
-  &PulseAudioSoundSystem::Create,
-#endif
-  &AlsaSoundSystem::Create,
-};
-
-}  // namespace cricket
diff --git a/sound/linuxsoundsystem.h b/sound/linuxsoundsystem.h
deleted file mode 100644
index eb48b88..0000000
--- a/sound/linuxsoundsystem.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_LINUXSOUNDSYSTEM_H_
-#define TALK_SOUND_LINUXSOUNDSYSTEM_H_
-
-#include "talk/sound/automaticallychosensoundsystem.h"
-
-namespace cricket {
-
-extern const SoundSystemCreator kLinuxSoundSystemCreators[
-#ifdef HAVE_LIBPULSE
-    2
-#else
-    1
-#endif
-    ];
-
-// The vast majority of Linux systems use ALSA for the device-level sound API,
-// but an increasing number are using PulseAudio for the application API and
-// only using ALSA internally in PulseAudio itself. But like everything on
-// Linux this is user-configurable, so we need to support both and choose the
-// right one at run-time.
-// PulseAudioSoundSystem is designed to only successfully initialize if
-// PulseAudio is installed and running, and if it is running then direct device
-// access using ALSA typically won't work, so if PulseAudioSoundSystem
-// initializes then we choose that. Otherwise we choose ALSA.
-typedef AutomaticallyChosenSoundSystem<
-    kLinuxSoundSystemCreators,
-    ARRAY_SIZE(kLinuxSoundSystemCreators)> LinuxSoundSystem;
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_LINUXSOUNDSYSTEM_H_
diff --git a/sound/nullsoundsystem.cc b/sound/nullsoundsystem.cc
deleted file mode 100644
index 3408d4c..0000000
--- a/sound/nullsoundsystem.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/nullsoundsystem.h"
-
-#include "talk/sound/sounddevicelocator.h"
-#include "talk/sound/soundinputstreaminterface.h"
-#include "talk/sound/soundoutputstreaminterface.h"
-#include "webrtc/base/logging.h"
-
-namespace rtc {
-
-class Thread;
-
-}
-
-namespace cricket {
-
-// Name used for the single device and the sound system itself.
-static const char kNullName[] = "null";
-
-class NullSoundDeviceLocator : public SoundDeviceLocator {
- public:
-  NullSoundDeviceLocator() : SoundDeviceLocator(kNullName, kNullName) {}
-
-  virtual SoundDeviceLocator *Copy() const {
-    return new NullSoundDeviceLocator();
-  }
-};
-
-class NullSoundInputStream : public SoundInputStreamInterface {
- public:
-  virtual bool StartReading() {
-    return true;
-  }
-
-  virtual bool StopReading() {
-    return true;
-  }
-
-  virtual bool GetVolume(int *volume) {
-    *volume = SoundSystemInterface::kMinVolume;
-    return true;
-  }
-
-  virtual bool SetVolume(int volume) {
-    return false;
-  }
-
-  virtual bool Close() {
-    return true;
-  }
-
-  virtual int LatencyUsecs() {
-    return 0;
-  }
-};
-
-class NullSoundOutputStream : public SoundOutputStreamInterface {
- public:
-  virtual bool EnableBufferMonitoring() {
-    return true;
-  }
-
-  virtual bool DisableBufferMonitoring() {
-    return true;
-  }
-
-  virtual bool WriteSamples(const void *sample_data,
-                            size_t size) {
-    LOG(LS_VERBOSE) << "Got " << size << " bytes of playback samples";
-    return true;
-  }
-
-  virtual bool GetVolume(int *volume) {
-    *volume = SoundSystemInterface::kMinVolume;
-    return true;
-  }
-
-  virtual bool SetVolume(int volume) {
-    return false;
-  }
-
-  virtual bool Close() {
-    return true;
-  }
-
-  virtual int LatencyUsecs() {
-    return 0;
-  }
-};
-
-NullSoundSystem::~NullSoundSystem() {
-}
-
-bool NullSoundSystem::Init() {
-  return true;
-}
-
-void NullSoundSystem::Terminate() {
-  // Nothing to do.
-}
-
-bool NullSoundSystem::EnumeratePlaybackDevices(
-      SoundSystemInterface::SoundDeviceLocatorList *devices) {
-  ClearSoundDeviceLocatorList(devices);
-  SoundDeviceLocator *device;
-  GetDefaultPlaybackDevice(&device);
-  devices->push_back(device);
-  return true;
-}
-
-bool NullSoundSystem::EnumerateCaptureDevices(
-      SoundSystemInterface::SoundDeviceLocatorList *devices) {
-  ClearSoundDeviceLocatorList(devices);
-  SoundDeviceLocator *device;
-  GetDefaultCaptureDevice(&device);
-  devices->push_back(device);
-  return true;
-}
-
-bool NullSoundSystem::GetDefaultPlaybackDevice(
-    SoundDeviceLocator **device) {
-  *device = new NullSoundDeviceLocator();
-  return true;
-}
-
-bool NullSoundSystem::GetDefaultCaptureDevice(
-    SoundDeviceLocator **device) {
-  *device = new NullSoundDeviceLocator();
-  return true;
-}
-
-SoundOutputStreamInterface *NullSoundSystem::OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params) {
-  return new NullSoundOutputStream();
-}
-
-SoundInputStreamInterface *NullSoundSystem::OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params) {
-  return new NullSoundInputStream();
-}
-
-const char *NullSoundSystem::GetName() const {
-  return kNullName;
-}
-
-}  // namespace cricket
diff --git a/sound/nullsoundsystem.h b/sound/nullsoundsystem.h
deleted file mode 100644
index 3edb4f9..0000000
--- a/sound/nullsoundsystem.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_NULLSOUNDSYSTEM_H_
-#define TALK_SOUND_NULLSOUNDSYSTEM_H_
-
-#include "talk/sound/soundsysteminterface.h"
-
-namespace cricket {
-
-class SoundDeviceLocator;
-class SoundInputStreamInterface;
-class SoundOutputStreamInterface;
-
-// A simple reference sound system that drops output samples and generates
-// no input samples.
-class NullSoundSystem : public SoundSystemInterface {
- public:
-  static SoundSystemInterface *Create() {
-    return new NullSoundSystem();
-  }
-
-  virtual ~NullSoundSystem();
-
-  virtual bool Init();
-  virtual void Terminate();
-
-  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
-  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
-
-  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-  virtual SoundInputStreamInterface *OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-
-  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
-  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
-
-  virtual const char *GetName() const;
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_NULLSOUNDSYSTEM_H_
diff --git a/sound/nullsoundsystemfactory.cc b/sound/nullsoundsystemfactory.cc
deleted file mode 100644
index 089d51f..0000000
--- a/sound/nullsoundsystemfactory.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/nullsoundsystemfactory.h"
-
-#include "talk/sound/nullsoundsystem.h"
-
-namespace cricket {
-
-NullSoundSystemFactory::NullSoundSystemFactory() {
-}
-
-NullSoundSystemFactory::~NullSoundSystemFactory() {
-}
-
-bool NullSoundSystemFactory::SetupInstance() {
-  instance_.reset(new NullSoundSystem());
-  return true;
-}
-
-void NullSoundSystemFactory::CleanupInstance() {
-  instance_.reset();
-}
-
-}  // namespace cricket
diff --git a/sound/nullsoundsystemfactory.h b/sound/nullsoundsystemfactory.h
deleted file mode 100644
index 71ae980..0000000
--- a/sound/nullsoundsystemfactory.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
-#define TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
-
-#include "talk/sound/soundsystemfactory.h"
-
-namespace cricket {
-
-// A SoundSystemFactory that always returns a NullSoundSystem. Intended for
-// testing.
-class NullSoundSystemFactory : public SoundSystemFactory {
- public:
-  NullSoundSystemFactory();
-  virtual ~NullSoundSystemFactory();
-
- protected:
-  // Inherited from SoundSystemFactory.
-  virtual bool SetupInstance();
-  virtual void CleanupInstance();
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_NULLSOUNDSYSTEMFACTORY_H_
diff --git a/sound/platformsoundsystem.cc b/sound/platformsoundsystem.cc
deleted file mode 100644
index c39fc83..0000000
--- a/sound/platformsoundsystem.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/platformsoundsystem.h"
-
-#include "webrtc/base/common.h"
-#ifdef LINUX
-#include "talk/sound/linuxsoundsystem.h"
-#else
-#include "talk/sound/nullsoundsystem.h"
-#endif
-
-namespace cricket {
-
-SoundSystemInterface *CreatePlatformSoundSystem() {
-#ifdef LINUX
-  return new LinuxSoundSystem();
-#else
-  ASSERT(false && "Not implemented");
-  return new NullSoundSystem();
-#endif
-}
-
-}  // namespace cricket
diff --git a/sound/platformsoundsystem.h b/sound/platformsoundsystem.h
deleted file mode 100644
index 1a8d214..0000000
--- a/sound/platformsoundsystem.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_PLATFORMSOUNDSYSTEM_H_
-#define TALK_SOUND_PLATFORMSOUNDSYSTEM_H_
-
-namespace cricket {
-
-class SoundSystemInterface;
-
-// Creates the sound system implementation for this platform.
-SoundSystemInterface *CreatePlatformSoundSystem();
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_PLATFORMSOUNDSYSTEM_H_
diff --git a/sound/platformsoundsystemfactory.cc b/sound/platformsoundsystemfactory.cc
deleted file mode 100644
index 6c69954..0000000
--- a/sound/platformsoundsystemfactory.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/platformsoundsystemfactory.h"
-
-#include "talk/sound/platformsoundsystem.h"
-#include "talk/sound/soundsysteminterface.h"
-
-namespace cricket {
-
-PlatformSoundSystemFactory::PlatformSoundSystemFactory() {
-}
-
-PlatformSoundSystemFactory::~PlatformSoundSystemFactory() {
-}
-
-bool PlatformSoundSystemFactory::SetupInstance() {
-  if (!instance_.get()) {
-    instance_.reset(CreatePlatformSoundSystem());
-  }
-  if (!instance_->Init()) {
-    LOG(LS_ERROR) << "Can't initialize platform's sound system";
-    return false;
-  }
-  return true;
-}
-
-void PlatformSoundSystemFactory::CleanupInstance() {
-  instance_->Terminate();
-  // We do not delete the sound system because we might be re-initialized soon.
-}
-
-}  // namespace cricket
diff --git a/sound/platformsoundsystemfactory.h b/sound/platformsoundsystemfactory.h
deleted file mode 100644
index 63ca863..0000000
--- a/sound/platformsoundsystemfactory.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
-#define TALK_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
-
-#include "talk/sound/soundsystemfactory.h"
-
-namespace cricket {
-
-// A SoundSystemFactory that returns the platform's native sound system
-// implementation.
-class PlatformSoundSystemFactory : public SoundSystemFactory {
- public:
-  PlatformSoundSystemFactory();
-  virtual ~PlatformSoundSystemFactory();
-
- protected:
-  // Inherited from SoundSystemFactory.
-  virtual bool SetupInstance();
-  virtual void CleanupInstance();
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_PLATFORMSOUNDSYSTEMFACTORY_H_
-
-
diff --git a/sound/pulseaudiosoundsystem.cc b/sound/pulseaudiosoundsystem.cc
deleted file mode 100644
index 24eea5c..0000000
--- a/sound/pulseaudiosoundsystem.cc
+++ /dev/null
@@ -1,1559 +0,0 @@
-/*
- * libjingle
- * Copyright 2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/pulseaudiosoundsystem.h"
-
-#ifdef HAVE_LIBPULSE
-
-#include "talk/sound/sounddevicelocator.h"
-#include "talk/sound/soundinputstreaminterface.h"
-#include "talk/sound/soundoutputstreaminterface.h"
-#include "webrtc/base/common.h"
-#include "webrtc/base/fileutils.h"  // for GetApplicationName()
-#include "webrtc/base/logging.h"
-#include "webrtc/base/timeutils.h"
-#include "webrtc/base/worker.h"
-
-namespace cricket {
-
-// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY.
-static const uint32_t kAdjustLatencyProtocolVersion = 13;
-
-// Lookup table from the cricket format enum in soundsysteminterface.h to
-// Pulse's enums.
-static const pa_sample_format_t kCricketFormatToPulseFormatTable[] = {
-  // The order here must match the order in soundsysteminterface.h
-  PA_SAMPLE_S16LE,
-};
-
-// Some timing constants for optimal operation. See
-// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html
-// for a good explanation of some of the factors that go into this.
-
-// Playback.
-
-// For playback, there is a round-trip delay to fill the server-side playback
-// buffer, so setting too low of a latency is a buffer underflow risk. We will
-// automatically increase the latency if a buffer underflow does occur, but we
-// also enforce a sane minimum at start-up time. Anything lower would be
-// virtually guaranteed to underflow at least once, so there's no point in
-// allowing lower latencies.
-static const int kPlaybackLatencyMinimumMsecs = 20;
-// Every time a playback stream underflows, we will reconfigure it with target
-// latency that is greater by this amount.
-static const int kPlaybackLatencyIncrementMsecs = 20;
-// We also need to configure a suitable request size. Too small and we'd burn
-// CPU from the overhead of transfering small amounts of data at once. Too large
-// and the amount of data remaining in the buffer right before refilling it
-// would be a buffer underflow risk. We set it to half of the buffer size.
-static const int kPlaybackRequestFactor = 2;
-
-// Capture.
-
-// For capture, low latency is not a buffer overflow risk, but it makes us burn
-// CPU from the overhead of transfering small amounts of data at once, so we set
-// a recommended value that we use for the kLowLatency constant (but if the user
-// explicitly requests something lower then we will honour it).
-// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%.
-static const int kLowCaptureLatencyMsecs = 10;
-// There is a round-trip delay to ack the data to the server, so the
-// server-side buffer needs extra space to prevent buffer overflow. 20ms is
-// sufficient, but there is no penalty to making it bigger, so we make it huge.
-// (750ms is libpulse's default value for the _total_ buffer size in the
-// kNoLatencyRequirements case.)
-static const int kCaptureBufferExtraMsecs = 750;
-
-static void FillPlaybackBufferAttr(int latency,
-                                   pa_buffer_attr *attr) {
-  attr->maxlength = latency;
-  attr->tlength = latency;
-  attr->minreq = latency / kPlaybackRequestFactor;
-  attr->prebuf = attr->tlength - attr->minreq;
-  LOG(LS_VERBOSE) << "Configuring latency = " << attr->tlength << ", minreq = "
-                  << attr->minreq << ", minfill = " << attr->prebuf;
-}
-
-static pa_volume_t CricketVolumeToPulseVolume(int volume) {
-  // PA's volume space goes from 0% at PA_VOLUME_MUTED (value 0) to 100% at
-  // PA_VOLUME_NORM (value 0x10000). It can also go beyond 100% up to
-  // PA_VOLUME_MAX (value UINT32_MAX-1), but using that is probably unwise.
-  // We just linearly map the 0-255 scale of SoundSystemInterface onto
-  // PA_VOLUME_MUTED-PA_VOLUME_NORM. If the programmer exceeds kMaxVolume then
-  // they can access the over-100% features of PA.
-  return PA_VOLUME_MUTED + (PA_VOLUME_NORM - PA_VOLUME_MUTED) *
-      volume / SoundSystemInterface::kMaxVolume;
-}
-
-static int PulseVolumeToCricketVolume(pa_volume_t pa_volume) {
-  return SoundSystemInterface::kMinVolume +
-      (SoundSystemInterface::kMaxVolume - SoundSystemInterface::kMinVolume) *
-      pa_volume / PA_VOLUME_NORM;
-}
-
-static pa_volume_t MaxChannelVolume(pa_cvolume *channel_volumes) {
-  pa_volume_t pa_volume = PA_VOLUME_MUTED;  // Minimum possible value.
-  for (int i = 0; i < channel_volumes->channels; ++i) {
-    if (pa_volume < channel_volumes->values[i]) {
-      pa_volume = channel_volumes->values[i];
-    }
-  }
-  return pa_volume;
-}
-
-class PulseAudioDeviceLocator : public SoundDeviceLocator {
- public:
-  PulseAudioDeviceLocator(const std::string &name,
-                          const std::string &device_name)
-      : SoundDeviceLocator(name, device_name) {
-  }
-
-  virtual SoundDeviceLocator *Copy() const {
-    return new PulseAudioDeviceLocator(*this);
-  }
-};
-
-// Functionality that is common to both PulseAudioInputStream and
-// PulseAudioOutputStream.
-class PulseAudioStream {
- public:
-  PulseAudioStream(PulseAudioSoundSystem *pulse, pa_stream *stream, int flags)
-      : pulse_(pulse), stream_(stream), flags_(flags) {
-  }
-
-  ~PulseAudioStream() {
-    // Close() should have been called during the containing class's destructor.
-    ASSERT(stream_ == NULL);
-  }
-
-  // Must be called with the lock held.
-  bool Close() {
-    if (!IsClosed()) {
-      // Unset this here so that we don't get a TERMINATED callback.
-      symbol_table()->pa_stream_set_state_callback()(stream_, NULL, NULL);
-      if (symbol_table()->pa_stream_disconnect()(stream_) != 0) {
-        LOG(LS_ERROR) << "Can't disconnect stream";
-        // Continue and return true anyways.
-      }
-      symbol_table()->pa_stream_unref()(stream_);
-      stream_ = NULL;
-    }
-    return true;
-  }
-
-  // Must be called with the lock held.
-  int LatencyUsecs() {
-    if (!(flags_ & SoundSystemInterface::FLAG_REPORT_LATENCY)) {
-      return 0;
-    }
-
-    pa_usec_t latency;
-    int negative;
-    Lock();
-    int re = symbol_table()->pa_stream_get_latency()(stream_, &latency,
-        &negative);
-    Unlock();
-    if (re != 0) {
-      LOG(LS_ERROR) << "Can't query latency";
-      // We'd rather continue playout/capture with an incorrect delay than stop
-      // it altogether, so return a valid value.
-      return 0;
-    }
-    if (negative) {
-      // The delay can be negative for monitoring streams if the captured
-      // samples haven't been played yet. In such a case, "latency" contains the
-      // magnitude, so we must negate it to get the real value.
-      return -latency;
-    } else {
-      return latency;
-    }
-  }
-
-  PulseAudioSoundSystem *pulse() {
-    return pulse_;
-  }
-
-  PulseAudioSymbolTable *symbol_table() {
-    return &pulse()->symbol_table_;
-  }
-
-  pa_stream *stream() {
-    ASSERT(stream_ != NULL);
-    return stream_;
-  }
-
-  bool IsClosed() {
-    return stream_ == NULL;
-  }
-
-  void Lock() {
-    pulse()->Lock();
-  }
-
-  void Unlock() {
-    pulse()->Unlock();
-  }
-
- private:
-  PulseAudioSoundSystem *pulse_;
-  pa_stream *stream_;
-  int flags_;
-
-  DISALLOW_COPY_AND_ASSIGN(PulseAudioStream);
-};
-
-// Implementation of an input stream. See soundinputstreaminterface.h regarding
-// thread-safety.
-class PulseAudioInputStream :
-    public SoundInputStreamInterface,
-    private rtc::Worker {
-
-  struct GetVolumeCallbackData {
-    PulseAudioInputStream *instance;
-    pa_cvolume *channel_volumes;
-  };
-
-  struct GetSourceChannelCountCallbackData {
-    PulseAudioInputStream *instance;
-    uint8_t *channels;
-  };
-
- public:
-  PulseAudioInputStream(PulseAudioSoundSystem *pulse,
-                        pa_stream *stream,
-                        int flags)
-      : stream_(pulse, stream, flags),
-        temp_sample_data_(NULL),
-        temp_sample_data_size_(0) {
-    // This callback seems to never be issued, but let's set it anyways.
-    symbol_table()->pa_stream_set_overflow_callback()(stream, &OverflowCallback,
-        NULL);
-  }
-
-  virtual ~PulseAudioInputStream() {
-    bool success = Close();
-    // We need that to live.
-    VERIFY(success);
-  }
-
-  virtual bool StartReading() {
-    return StartWork();
-  }
-
-  virtual bool StopReading() {
-    return StopWork();
-  }
-
-  virtual bool GetVolume(int *volume) {
-    bool ret = false;
-
-    Lock();
-
-    // Unlike output streams, input streams have no concept of a stream volume,
-    // only a device volume. So we have to retrieve the volume of the device
-    // itself.
-
-    pa_cvolume channel_volumes;
-
-    GetVolumeCallbackData data;
-    data.instance = this;
-    data.channel_volumes = &channel_volumes;
-
-    pa_operation *op = symbol_table()->pa_context_get_source_info_by_index()(
-            stream_.pulse()->context_,
-            symbol_table()->pa_stream_get_device_index()(stream_.stream()),
-            &GetVolumeCallbackThunk,
-            &data);
-    if (!stream_.pulse()->FinishOperation(op)) {
-      goto done;
-    }
-
-    if (data.channel_volumes) {
-      // This pointer was never unset by the callback, so we must have received
-      // an empty list of infos. This probably never happens, but we code for it
-      // anyway.
-      LOG(LS_ERROR) << "Did not receive GetVolumeCallback";
-      goto done;
-    }
-
-    // We now have the volume for each channel. Each channel could have a
-    // different volume if, e.g., the user went and changed the volumes in the
-    // PA UI. To get a single volume for SoundSystemInterface we just take the
-    // maximum. Ideally we'd do so with pa_cvolume_max, but it doesn't exist in
-    // Hardy, so we do it manually.
-    pa_volume_t pa_volume;
-    pa_volume = MaxChannelVolume(&channel_volumes);
-    // Now map onto the SoundSystemInterface range.
-    *volume = PulseVolumeToCricketVolume(pa_volume);
-
-    ret = true;
-   done:
-    Unlock();
-    return ret;
-  }
-
-  virtual bool SetVolume(int volume) {
-    bool ret = false;
-    pa_volume_t pa_volume = CricketVolumeToPulseVolume(volume);
-
-    Lock();
-
-    // Unlike output streams, input streams have no concept of a stream volume,
-    // only a device volume. So we have to change the volume of the device
-    // itself.
-
-    // The device may have a different number of channels than the stream and
-    // their mapping may be different, so we don't want to use the channel count
-    // from our sample spec. We could use PA_CHANNELS_MAX to cover our bases,
-    // and the server allows that even if the device's channel count is lower,
-    // but some buggy PA clients don't like that (the pavucontrol on Hardy dies
-    // in an assert if the channel count is different). So instead we look up
-    // the actual number of channels that the device has.
-
-    uint8_t channels;
-
-    GetSourceChannelCountCallbackData data;
-    data.instance = this;
-    data.channels = &channels;
-
-    uint32_t device_index = symbol_table()->pa_stream_get_device_index()(
-        stream_.stream());
-
-    pa_operation *op = symbol_table()->pa_context_get_source_info_by_index()(
-        stream_.pulse()->context_,
-        device_index,
-        &GetSourceChannelCountCallbackThunk,
-        &data);
-    if (!stream_.pulse()->FinishOperation(op)) {
-      goto done;
-    }
-
-    if (data.channels) {
-      // This pointer was never unset by the callback, so we must have received
-      // an empty list of infos. This probably never happens, but we code for it
-      // anyway.
-      LOG(LS_ERROR) << "Did not receive GetSourceChannelCountCallback";
-      goto done;
-    }
-
-    pa_cvolume channel_volumes;
-    symbol_table()->pa_cvolume_set()(&channel_volumes, channels, pa_volume);
-
-    op = symbol_table()->pa_context_set_source_volume_by_index()(
-        stream_.pulse()->context_,
-        device_index,
-        &channel_volumes,
-        // This callback merely logs errors.
-        &SetVolumeCallback,
-        NULL);
-    if (!op) {
-      LOG(LS_ERROR) << "pa_context_set_source_volume_by_index()";
-      goto done;
-    }
-    // Don't need to wait for this to complete.
-    symbol_table()->pa_operation_unref()(op);
-
-    ret = true;
-   done:
-    Unlock();
-    return ret;
-  }
-
-  virtual bool Close() {
-    if (!StopReading()) {
-      return false;
-    }
-    bool ret = true;
-    if (!stream_.IsClosed()) {
-      Lock();
-      ret = stream_.Close();
-      Unlock();
-    }
-    return ret;
-  }
-
-  virtual int LatencyUsecs() {
-    return stream_.LatencyUsecs();
-  }
-
- private:
-  void Lock() {
-    stream_.Lock();
-  }
-
-  void Unlock() {
-    stream_.Unlock();
-  }
-
-  PulseAudioSymbolTable *symbol_table() {
-    return stream_.symbol_table();
-  }
-
-  void EnableReadCallback() {
-    symbol_table()->pa_stream_set_read_callback()(
-         stream_.stream(),
-         &ReadCallbackThunk,
-         this);
-  }
-
-  void DisableReadCallback() {
-    symbol_table()->pa_stream_set_read_callback()(
-         stream_.stream(),
-         NULL,
-         NULL);
-  }
-
-  static void ReadCallbackThunk(pa_stream *unused1,
-                                size_t unused2,
-                                void *userdata) {
-    PulseAudioInputStream *instance =
-        static_cast<PulseAudioInputStream *>(userdata);
-    instance->OnReadCallback();
-  }
-
-  void OnReadCallback() {
-    // We get the data pointer and size now in order to save one Lock/Unlock
-    // on OnMessage.
-    if (symbol_table()->pa_stream_peek()(stream_.stream(),
-                                         &temp_sample_data_,
-                                         &temp_sample_data_size_) != 0) {
-      LOG(LS_ERROR) << "Can't read data!";
-      return;
-    }
-    // Since we consume the data asynchronously on a different thread, we have
-    // to temporarily disable the read callback or else Pulse will call it
-    // continuously until we consume the data. We re-enable it below.
-    DisableReadCallback();
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStart() {
-    Lock();
-    EnableReadCallback();
-    Unlock();
-  }
-
-  // Inherited from Worker.
-  virtual void OnHaveWork() {
-    ASSERT(temp_sample_data_ && temp_sample_data_size_);
-    SignalSamplesRead(temp_sample_data_,
-                      temp_sample_data_size_,
-                      this);
-    temp_sample_data_ = NULL;
-    temp_sample_data_size_ = 0;
-
-    Lock();
-    for (;;) {
-      // Ack the last thing we read.
-      if (symbol_table()->pa_stream_drop()(stream_.stream()) != 0) {
-        LOG(LS_ERROR) << "Can't ack read data";
-      }
-
-      if (symbol_table()->pa_stream_readable_size()(stream_.stream()) <= 0) {
-        // Then that was all the data.
-        break;
-      }
-
-      // Else more data.
-      const void *sample_data;
-      size_t sample_data_size;
-      if (symbol_table()->pa_stream_peek()(stream_.stream(),
-                                           &sample_data,
-                                           &sample_data_size) != 0) {
-        LOG(LS_ERROR) << "Can't read data!";
-        break;
-      }
-
-      // Drop lock for sigslot dispatch, which could take a while.
-      Unlock();
-      SignalSamplesRead(sample_data, sample_data_size, this);
-      Lock();
-
-      // Return to top of loop for the ack and the check for more data.
-    }
-    EnableReadCallback();
-    Unlock();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStop() {
-    Lock();
-    DisableReadCallback();
-    Unlock();
-  }
-
-  static void OverflowCallback(pa_stream *stream,
-                               void *userdata) {
-    LOG(LS_WARNING) << "Buffer overflow on capture stream " << stream;
-  }
-
-  static void GetVolumeCallbackThunk(pa_context *unused,
-                                     const pa_source_info *info,
-                                     int eol,
-                                     void *userdata) {
-    GetVolumeCallbackData *data =
-        static_cast<GetVolumeCallbackData *>(userdata);
-    data->instance->OnGetVolumeCallback(info, eol, &data->channel_volumes);
-  }
-
-  void OnGetVolumeCallback(const pa_source_info *info,
-                           int eol,
-                           pa_cvolume **channel_volumes) {
-    if (eol) {
-      // List is over. Wake GetVolume().
-      stream_.pulse()->Signal();
-      return;
-    }
-
-    if (*channel_volumes) {
-      **channel_volumes = info->volume;
-      // Unset the pointer so that we know that we have have already copied the
-      // volume.
-      *channel_volumes = NULL;
-    } else {
-      // We have received an additional callback after the first one, which
-      // doesn't make sense for a single source. This probably never happens,
-      // but we code for it anyway.
-      LOG(LS_WARNING) << "Ignoring extra GetVolumeCallback";
-    }
-  }
-
-  static void GetSourceChannelCountCallbackThunk(pa_context *unused,
-                                                 const pa_source_info *info,
-                                                 int eol,
-                                                 void *userdata) {
-    GetSourceChannelCountCallbackData *data =
-        static_cast<GetSourceChannelCountCallbackData *>(userdata);
-    data->instance->OnGetSourceChannelCountCallback(info, eol, &data->channels);
-  }
-
-  void OnGetSourceChannelCountCallback(const pa_source_info *info,
-                                       int eol,
-                                       uint8_t **channels) {
-    if (eol) {
-      // List is over. Wake SetVolume().
-      stream_.pulse()->Signal();
-      return;
-    }
-
-    if (*channels) {
-      **channels = info->channel_map.channels;
-      // Unset the pointer so that we know that we have have already copied the
-      // channel count.
-      *channels = NULL;
-    } else {
-      // We have received an additional callback after the first one, which
-      // doesn't make sense for a single source. This probably never happens,
-      // but we code for it anyway.
-      LOG(LS_WARNING) << "Ignoring extra GetSourceChannelCountCallback";
-    }
-  }
-
-  static void SetVolumeCallback(pa_context *unused1,
-                                int success,
-                                void *unused2) {
-    if (!success) {
-      LOG(LS_ERROR) << "Failed to change capture volume";
-    }
-  }
-
-  PulseAudioStream stream_;
-  // Temporary storage for passing data between threads.
-  const void *temp_sample_data_;
-  size_t temp_sample_data_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
-};
-
-// Implementation of an output stream. See soundoutputstreaminterface.h
-// regarding thread-safety.
-class PulseAudioOutputStream :
-    public SoundOutputStreamInterface,
-    private rtc::Worker {
-
-  struct GetVolumeCallbackData {
-    PulseAudioOutputStream *instance;
-    pa_cvolume *channel_volumes;
-  };
-
- public:
-  PulseAudioOutputStream(PulseAudioSoundSystem *pulse,
-                         pa_stream *stream,
-                         int flags,
-                         int latency)
-      : stream_(pulse, stream, flags),
-        configured_latency_(latency),
-        temp_buffer_space_(0) {
-    symbol_table()->pa_stream_set_underflow_callback()(stream,
-                                                       &UnderflowCallbackThunk,
-                                                       this);
-  }
-
-  virtual ~PulseAudioOutputStream() {
-    bool success = Close();
-    // We need that to live.
-    VERIFY(success);
-  }
-
-  virtual bool EnableBufferMonitoring() {
-    return StartWork();
-  }
-
-  virtual bool DisableBufferMonitoring() {
-    return StopWork();
-  }
-
-  virtual bool WriteSamples(const void *sample_data,
-                            size_t size) {
-    bool ret = true;
-    Lock();
-    if (symbol_table()->pa_stream_write()(stream_.stream(),
-                                          sample_data,
-                                          size,
-                                          NULL,
-                                          0,
-                                          PA_SEEK_RELATIVE) != 0) {
-      LOG(LS_ERROR) << "Unable to write";
-      ret = false;
-    }
-    Unlock();
-    return ret;
-  }
-
-  virtual bool GetVolume(int *volume) {
-    bool ret = false;
-
-    Lock();
-
-    pa_cvolume channel_volumes;
-
-    GetVolumeCallbackData data;
-    data.instance = this;
-    data.channel_volumes = &channel_volumes;
-
-    pa_operation *op = symbol_table()->pa_context_get_sink_input_info()(
-            stream_.pulse()->context_,
-            symbol_table()->pa_stream_get_index()(stream_.stream()),
-            &GetVolumeCallbackThunk,
-            &data);
-    if (!stream_.pulse()->FinishOperation(op)) {
-      goto done;
-    }
-
-    if (data.channel_volumes) {
-      // This pointer was never unset by the callback, so we must have received
-      // an empty list of infos. This probably never happens, but we code for it
-      // anyway.
-      LOG(LS_ERROR) << "Did not receive GetVolumeCallback";
-      goto done;
-    }
-
-    // We now have the volume for each channel. Each channel could have a
-    // different volume if, e.g., the user went and changed the volumes in the
-    // PA UI. To get a single volume for SoundSystemInterface we just take the
-    // maximum. Ideally we'd do so with pa_cvolume_max, but it doesn't exist in
-    // Hardy, so we do it manually.
-    pa_volume_t pa_volume;
-    pa_volume = MaxChannelVolume(&channel_volumes);
-    // Now map onto the SoundSystemInterface range.
-    *volume = PulseVolumeToCricketVolume(pa_volume);
-
-    ret = true;
-   done:
-    Unlock();
-    return ret;
-  }
-
-  virtual bool SetVolume(int volume) {
-    bool ret = false;
-    pa_volume_t pa_volume = CricketVolumeToPulseVolume(volume);
-
-    Lock();
-
-    const pa_sample_spec *spec = symbol_table()->pa_stream_get_sample_spec()(
-        stream_.stream());
-    if (!spec) {
-      LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
-      goto done;
-    }
-
-    pa_cvolume channel_volumes;
-    symbol_table()->pa_cvolume_set()(&channel_volumes, spec->channels,
-        pa_volume);
-
-    pa_operation *op;
-    op = symbol_table()->pa_context_set_sink_input_volume()(
-        stream_.pulse()->context_,
-        symbol_table()->pa_stream_get_index()(stream_.stream()),
-        &channel_volumes,
-        // This callback merely logs errors.
-        &SetVolumeCallback,
-        NULL);
-    if (!op) {
-      LOG(LS_ERROR) << "pa_context_set_sink_input_volume()";
-      goto done;
-    }
-    // Don't need to wait for this to complete.
-    symbol_table()->pa_operation_unref()(op);
-
-    ret = true;
-   done:
-    Unlock();
-    return ret;
-  }
-
-  virtual bool Close() {
-    if (!DisableBufferMonitoring()) {
-      return false;
-    }
-    bool ret = true;
-    if (!stream_.IsClosed()) {
-      Lock();
-      symbol_table()->pa_stream_set_underflow_callback()(stream_.stream(),
-                                                         NULL,
-                                                         NULL);
-      ret = stream_.Close();
-      Unlock();
-    }
-    return ret;
-  }
-
-  virtual int LatencyUsecs() {
-    return stream_.LatencyUsecs();
-  }
-
-#if 0
-  // TODO: Versions 0.9.16 and later of Pulse have a new API for
-  // zero-copy writes, but Hardy is not new enough to have that so we can't
-  // rely on it. Perhaps auto-detect if it's present or not and use it if we
-  // can?
-
-  virtual bool GetWriteBuffer(void **buffer, size_t *size) {
-    bool ret = true;
-    Lock();
-    if (symbol_table()->pa_stream_begin_write()(stream_.stream(), buffer, size)
-            != 0) {
-      LOG(LS_ERROR) << "Can't get write buffer";
-      ret = false;
-    }
-    Unlock();
-    return ret;
-  }
-
-  // Releases the caller's hold on the write buffer. "written" must be the
-  // amount of data that was written.
-  virtual bool ReleaseWriteBuffer(void *buffer, size_t written) {
-    bool ret = true;
-    Lock();
-    if (written == 0) {
-      if (symbol_table()->pa_stream_cancel_write()(stream_.stream()) != 0) {
-        LOG(LS_ERROR) << "Can't cancel write";
-        ret = false;
-      }
-    } else {
-      if (symbol_table()->pa_stream_write()(stream_.stream(),
-                                            buffer,
-                                            written,
-                                            NULL,
-                                            0,
-                                            PA_SEEK_RELATIVE) != 0) {
-        LOG(LS_ERROR) << "Unable to write";
-        ret = false;
-      }
-    }
-    Unlock();
-    return ret;
-  }
-#endif
-
- private:
-  void Lock() {
-    stream_.Lock();
-  }
-
-  void Unlock() {
-    stream_.Unlock();
-  }
-
-  PulseAudioSymbolTable *symbol_table() {
-    return stream_.symbol_table();
-  }
-
-  void EnableWriteCallback() {
-    pa_stream_state_t state = symbol_table()->pa_stream_get_state()(
-        stream_.stream());
-    if (state == PA_STREAM_READY) {
-      // May already have available space. Must check.
-      temp_buffer_space_ = symbol_table()->pa_stream_writable_size()(
-          stream_.stream());
-      if (temp_buffer_space_ > 0) {
-        // Yup, there is already space available, so if we register a write
-        // callback then it will not receive any event. So dispatch one ourself
-        // instead.
-        HaveWork();
-        return;
-      }
-    }
-    symbol_table()->pa_stream_set_write_callback()(
-         stream_.stream(),
-         &WriteCallbackThunk,
-         this);
-  }
-
-  void DisableWriteCallback() {
-    symbol_table()->pa_stream_set_write_callback()(
-         stream_.stream(),
-         NULL,
-         NULL);
-  }
-
-  static void WriteCallbackThunk(pa_stream *unused,
-                                 size_t buffer_space,
-                                 void *userdata) {
-    PulseAudioOutputStream *instance =
-        static_cast<PulseAudioOutputStream *>(userdata);
-    instance->OnWriteCallback(buffer_space);
-  }
-
-  void OnWriteCallback(size_t buffer_space) {
-    temp_buffer_space_ = buffer_space;
-    // Since we write the data asynchronously on a different thread, we have
-    // to temporarily disable the write callback or else Pulse will call it
-    // continuously until we write the data. We re-enable it below.
-    DisableWriteCallback();
-    HaveWork();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStart() {
-    Lock();
-    EnableWriteCallback();
-    Unlock();
-  }
-
-  // Inherited from Worker.
-  virtual void OnHaveWork() {
-    ASSERT(temp_buffer_space_ > 0);
-
-    SignalBufferSpace(temp_buffer_space_, this);
-
-    temp_buffer_space_ = 0;
-    Lock();
-    EnableWriteCallback();
-    Unlock();
-  }
-
-  // Inherited from Worker.
-  virtual void OnStop() {
-    Lock();
-    DisableWriteCallback();
-    Unlock();
-  }
-
-  static void UnderflowCallbackThunk(pa_stream *unused,
-                                     void *userdata) {
-    PulseAudioOutputStream *instance =
-        static_cast<PulseAudioOutputStream *>(userdata);
-    instance->OnUnderflowCallback();
-  }
-
-  void OnUnderflowCallback() {
-    LOG(LS_WARNING) << "Buffer underflow on playback stream "
-                    << stream_.stream();
-
-    if (configured_latency_ == SoundSystemInterface::kNoLatencyRequirements) {
-      // We didn't configure a pa_buffer_attr before, so switching to one now
-      // would be questionable.
-      return;
-    }
-
-    // Otherwise reconfigure the stream with a higher target latency.
-
-    const pa_sample_spec *spec = symbol_table()->pa_stream_get_sample_spec()(
-        stream_.stream());
-    if (!spec) {
-      LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
-      return;
-    }
-
-    size_t bytes_per_sec = symbol_table()->pa_bytes_per_second()(spec);
-
-    int new_latency = configured_latency_ +
-        bytes_per_sec * kPlaybackLatencyIncrementMsecs /
-        rtc::kNumMicrosecsPerSec;
-
-    pa_buffer_attr new_attr = {0};
-    FillPlaybackBufferAttr(new_latency, &new_attr);
-
-    pa_operation *op = symbol_table()->pa_stream_set_buffer_attr()(
-        stream_.stream(),
-        &new_attr,
-        // No callback.
-        NULL,
-        NULL);
-    if (!op) {
-      LOG(LS_ERROR) << "pa_stream_set_buffer_attr()";
-      return;
-    }
-    // Don't need to wait for this to complete.
-    symbol_table()->pa_operation_unref()(op);
-
-    // Save the new latency in case we underflow again.
-    configured_latency_ = new_latency;
-  }
-
-  static void GetVolumeCallbackThunk(pa_context *unused,
-                                     const pa_sink_input_info *info,
-                                     int eol,
-                                     void *userdata) {
-    GetVolumeCallbackData *data =
-        static_cast<GetVolumeCallbackData *>(userdata);
-    data->instance->OnGetVolumeCallback(info, eol, &data->channel_volumes);
-  }
-
-  void OnGetVolumeCallback(const pa_sink_input_info *info,
-                           int eol,
-                           pa_cvolume **channel_volumes) {
-    if (eol) {
-      // List is over. Wake GetVolume().
-      stream_.pulse()->Signal();
-      return;
-    }
-
-    if (*channel_volumes) {
-      **channel_volumes = info->volume;
-      // Unset the pointer so that we know that we have have already copied the
-      // volume.
-      *channel_volumes = NULL;
-    } else {
-      // We have received an additional callback after the first one, which
-      // doesn't make sense for a single sink input. This probably never
-      // happens, but we code for it anyway.
-      LOG(LS_WARNING) << "Ignoring extra GetVolumeCallback";
-    }
-  }
-
-  static void SetVolumeCallback(pa_context *unused1,
-                                int success,
-                                void *unused2) {
-    if (!success) {
-      LOG(LS_ERROR) << "Failed to change playback volume";
-    }
-  }
-
-  PulseAudioStream stream_;
-  int configured_latency_;
-  // Temporary storage for passing data between threads.
-  size_t temp_buffer_space_;
-
-  DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
-};
-
-PulseAudioSoundSystem::PulseAudioSoundSystem()
-    : mainloop_(NULL), context_(NULL) {
-}
-
-PulseAudioSoundSystem::~PulseAudioSoundSystem() {
-  Terminate();
-}
-
-bool PulseAudioSoundSystem::Init() {
-  if (IsInitialized()) {
-    return true;
-  }
-
-  // Load libpulse.
-  if (!symbol_table_.Load()) {
-    // Most likely the Pulse library and sound server are not installed on
-    // this system.
-    LOG(LS_WARNING) << "Failed to load symbol table";
-    return false;
-  }
-
-  // Now create and start the Pulse event thread.
-  mainloop_ = symbol_table_.pa_threaded_mainloop_new()();
-  if (!mainloop_) {
-    LOG(LS_ERROR) << "Can't create mainloop";
-    goto fail0;
-  }
-
-  if (symbol_table_.pa_threaded_mainloop_start()(mainloop_) != 0) {
-    LOG(LS_ERROR) << "Can't start mainloop";
-    goto fail1;
-  }
-
-  Lock();
-  context_ = CreateNewConnection();
-  Unlock();
-
-  if (!context_) {
-    goto fail2;
-  }
-
-  // Otherwise we're now ready!
-  return true;
-
- fail2:
-  symbol_table_.pa_threaded_mainloop_stop()(mainloop_);
- fail1:
-  symbol_table_.pa_threaded_mainloop_free()(mainloop_);
-  mainloop_ = NULL;
- fail0:
-  return false;
-}
-
-void PulseAudioSoundSystem::Terminate() {
-  if (!IsInitialized()) {
-    return;
-  }
-
-  Lock();
-  symbol_table_.pa_context_disconnect()(context_);
-  symbol_table_.pa_context_unref()(context_);
-  Unlock();
-  context_ = NULL;
-  symbol_table_.pa_threaded_mainloop_stop()(mainloop_);
-  symbol_table_.pa_threaded_mainloop_free()(mainloop_);
-  mainloop_ = NULL;
-
-  // We do not unload the symbol table because we may need it again soon if
-  // Init() is called again.
-}
-
-bool PulseAudioSoundSystem::EnumeratePlaybackDevices(
-    SoundDeviceLocatorList *devices) {
-  return EnumerateDevices<pa_sink_info>(
-      devices,
-      symbol_table_.pa_context_get_sink_info_list(),
-      &EnumeratePlaybackDevicesCallbackThunk);
-}
-
-bool PulseAudioSoundSystem::EnumerateCaptureDevices(
-    SoundDeviceLocatorList *devices) {
-  return EnumerateDevices<pa_source_info>(
-      devices,
-      symbol_table_.pa_context_get_source_info_list(),
-      &EnumerateCaptureDevicesCallbackThunk);
-}
-
-bool PulseAudioSoundSystem::GetDefaultPlaybackDevice(
-    SoundDeviceLocator **device) {
-  return GetDefaultDevice<&pa_server_info::default_sink_name>(device);
-}
-
-bool PulseAudioSoundSystem::GetDefaultCaptureDevice(
-    SoundDeviceLocator **device) {
-  return GetDefaultDevice<&pa_server_info::default_source_name>(device);
-}
-
-SoundOutputStreamInterface *PulseAudioSoundSystem::OpenPlaybackDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return OpenDevice<SoundOutputStreamInterface>(
-      device,
-      params,
-      "Playback",
-      &PulseAudioSoundSystem::ConnectOutputStream);
-}
-
-SoundInputStreamInterface *PulseAudioSoundSystem::OpenCaptureDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return OpenDevice<SoundInputStreamInterface>(
-      device,
-      params,
-      "Capture",
-      &PulseAudioSoundSystem::ConnectInputStream);
-}
-
-const char *PulseAudioSoundSystem::GetName() const {
-  return "PulseAudio";
-}
-
-inline bool PulseAudioSoundSystem::IsInitialized() {
-  return mainloop_ != NULL;
-}
-
-struct ConnectToPulseCallbackData {
-  PulseAudioSoundSystem *instance;
-  bool connect_done;
-};
-
-void PulseAudioSoundSystem::ConnectToPulseCallbackThunk(
-    pa_context *context, void *userdata) {
-  ConnectToPulseCallbackData *data =
-      static_cast<ConnectToPulseCallbackData *>(userdata);
-  data->instance->OnConnectToPulseCallback(context, &data->connect_done);
-}
-
-void PulseAudioSoundSystem::OnConnectToPulseCallback(
-    pa_context *context, bool *connect_done) {
-  pa_context_state_t state = symbol_table_.pa_context_get_state()(context);
-  if (state == PA_CONTEXT_READY ||
-      state == PA_CONTEXT_FAILED ||
-      state == PA_CONTEXT_TERMINATED) {
-    // Connection process has reached a terminal state. Wake ConnectToPulse().
-    *connect_done = true;
-    Signal();
-  }
-}
-
-// Must be called with the lock held.
-bool PulseAudioSoundSystem::ConnectToPulse(pa_context *context) {
-  bool ret = true;
-  ConnectToPulseCallbackData data;
-  // Have to put this up here to satisfy the compiler.
-  pa_context_state_t state;
-
-  data.instance = this;
-  data.connect_done = false;
-
-  symbol_table_.pa_context_set_state_callback()(context,
-                                                &ConnectToPulseCallbackThunk,
-                                                &data);
-
-  // Connect to PulseAudio sound server.
-  if (symbol_table_.pa_context_connect()(
-          context,
-          NULL,          // Default server
-          PA_CONTEXT_NOAUTOSPAWN,
-          NULL) != 0) {  // No special fork handling needed
-    LOG(LS_ERROR) << "Can't start connection to PulseAudio sound server";
-    ret = false;
-    goto done;
-  }
-
-  // Wait for the connection state machine to reach a terminal state.
-  do {
-    Wait();
-  } while (!data.connect_done);
-
-  // Now check to see what final state we reached.
-  state = symbol_table_.pa_context_get_state()(context);
-
-  if (state != PA_CONTEXT_READY) {
-    if (state == PA_CONTEXT_FAILED) {
-      LOG(LS_ERROR) << "Failed to connect to PulseAudio sound server";
-    } else if (state == PA_CONTEXT_TERMINATED) {
-      LOG(LS_ERROR) << "PulseAudio connection terminated early";
-    } else {
-      // Shouldn't happen, because we only signal on one of those three states.
-      LOG(LS_ERROR) << "Unknown problem connecting to PulseAudio";
-    }
-    ret = false;
-  }
-
- done:
-  // We unset our callback for safety just in case the state might somehow
-  // change later, because the pointer to "data" will be invalid after return
-  // from this function.
-  symbol_table_.pa_context_set_state_callback()(context, NULL, NULL);
-  return ret;
-}
-
-// Must be called with the lock held.
-pa_context *PulseAudioSoundSystem::CreateNewConnection() {
-  // Create connection context.
-  std::string app_name;
-  // TODO: Pulse etiquette says this name should be localized. Do
-  // we care?
-  rtc::Filesystem::GetApplicationName(&app_name);
-  pa_context *context = symbol_table_.pa_context_new()(
-      symbol_table_.pa_threaded_mainloop_get_api()(mainloop_),
-      app_name.c_str());
-  if (!context) {
-    LOG(LS_ERROR) << "Can't create context";
-    goto fail0;
-  }
-
-  // Now connect.
-  if (!ConnectToPulse(context)) {
-    goto fail1;
-  }
-
-  // Otherwise the connection succeeded and is ready.
-  return context;
-
- fail1:
-  symbol_table_.pa_context_unref()(context);
- fail0:
-  return NULL;
-}
-
-struct EnumerateDevicesCallbackData {
-  PulseAudioSoundSystem *instance;
-  SoundSystemInterface::SoundDeviceLocatorList *devices;
-};
-
-void PulseAudioSoundSystem::EnumeratePlaybackDevicesCallbackThunk(
-    pa_context *unused,
-    const pa_sink_info *info,
-    int eol,
-    void *userdata) {
-  EnumerateDevicesCallbackData *data =
-      static_cast<EnumerateDevicesCallbackData *>(userdata);
-  data->instance->OnEnumeratePlaybackDevicesCallback(data->devices, info, eol);
-}
-
-void PulseAudioSoundSystem::EnumerateCaptureDevicesCallbackThunk(
-    pa_context *unused,
-    const pa_source_info *info,
-    int eol,
-    void *userdata) {
-  EnumerateDevicesCallbackData *data =
-      static_cast<EnumerateDevicesCallbackData *>(userdata);
-  data->instance->OnEnumerateCaptureDevicesCallback(data->devices, info, eol);
-}
-
-void PulseAudioSoundSystem::OnEnumeratePlaybackDevicesCallback(
-    SoundDeviceLocatorList *devices,
-    const pa_sink_info *info,
-    int eol) {
-  if (eol) {
-    // List is over. Wake EnumerateDevices().
-    Signal();
-    return;
-  }
-
-  // Else this is the next device.
-  devices->push_back(
-      new PulseAudioDeviceLocator(info->description, info->name));
-}
-
-void PulseAudioSoundSystem::OnEnumerateCaptureDevicesCallback(
-    SoundDeviceLocatorList *devices,
-    const pa_source_info *info,
-    int eol) {
-  if (eol) {
-    // List is over. Wake EnumerateDevices().
-    Signal();
-    return;
-  }
-
-  if (info->monitor_of_sink != PA_INVALID_INDEX) {
-    // We don't want to list monitor sources, since they are almost certainly
-    // not what the user wants for voice conferencing.
-    return;
-  }
-
-  // Else this is the next device.
-  devices->push_back(
-      new PulseAudioDeviceLocator(info->description, info->name));
-}
-
-template <typename InfoStruct>
-bool PulseAudioSoundSystem::EnumerateDevices(
-    SoundDeviceLocatorList *devices,
-    pa_operation *(*enumerate_fn)(
-        pa_context *c,
-        void (*callback_fn)(
-            pa_context *c,
-            const InfoStruct *i,
-            int eol,
-            void *userdata),
-        void *userdata),
-    void (*callback_fn)(
-        pa_context *c,
-        const InfoStruct *i,
-        int eol,
-        void *userdata)) {
-  ClearSoundDeviceLocatorList(devices);
-  if (!IsInitialized()) {
-    return false;
-  }
-
-  EnumerateDevicesCallbackData data;
-  data.instance = this;
-  data.devices = devices;
-
-  Lock();
-  pa_operation *op = (*enumerate_fn)(
-      context_,
-      callback_fn,
-      &data);
-  bool ret = FinishOperation(op);
-  Unlock();
-  return ret;
-}
-
-struct GetDefaultDeviceCallbackData {
-  PulseAudioSoundSystem *instance;
-  SoundDeviceLocator **device;
-};
-
-template <const char *(pa_server_info::*field)>
-void PulseAudioSoundSystem::GetDefaultDeviceCallbackThunk(
-    pa_context *unused,
-    const pa_server_info *info,
-    void *userdata) {
-  GetDefaultDeviceCallbackData *data =
-      static_cast<GetDefaultDeviceCallbackData *>(userdata);
-  data->instance->OnGetDefaultDeviceCallback<field>(info, data->device);
-}
-
-template <const char *(pa_server_info::*field)>
-void PulseAudioSoundSystem::OnGetDefaultDeviceCallback(
-    const pa_server_info *info,
-    SoundDeviceLocator **device) {
-  if (info) {
-    const char *dev = info->*field;
-    if (dev) {
-      *device = new PulseAudioDeviceLocator("Default device", dev);
-    }
-  }
-  Signal();
-}
-
-template <const char *(pa_server_info::*field)>
-bool PulseAudioSoundSystem::GetDefaultDevice(SoundDeviceLocator **device) {
-  if (!IsInitialized()) {
-    return false;
-  }
-  bool ret;
-  *device = NULL;
-  GetDefaultDeviceCallbackData data;
-  data.instance = this;
-  data.device = device;
-  Lock();
-  pa_operation *op = symbol_table_.pa_context_get_server_info()(
-      context_,
-      &GetDefaultDeviceCallbackThunk<field>,
-      &data);
-  ret = FinishOperation(op);
-  Unlock();
-  return ret && (*device != NULL);
-}
-
-void PulseAudioSoundSystem::StreamStateChangedCallbackThunk(
-    pa_stream *stream,
-    void *userdata) {
-  PulseAudioSoundSystem *instance =
-      static_cast<PulseAudioSoundSystem *>(userdata);
-  instance->OnStreamStateChangedCallback(stream);
-}
-
-void PulseAudioSoundSystem::OnStreamStateChangedCallback(pa_stream *stream) {
-  pa_stream_state_t state = symbol_table_.pa_stream_get_state()(stream);
-  if (state == PA_STREAM_READY) {
-    LOG(LS_INFO) << "Pulse stream " << stream << " ready";
-  } else if (state == PA_STREAM_FAILED ||
-             state == PA_STREAM_TERMINATED ||
-             state == PA_STREAM_UNCONNECTED) {
-    LOG(LS_ERROR) << "Pulse stream " << stream << " failed to connect: "
-                  << LastError();
-  }
-}
-
-template <typename StreamInterface>
-StreamInterface *PulseAudioSoundSystem::OpenDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params,
-    const char *stream_name,
-    StreamInterface *(PulseAudioSoundSystem::*connect_fn)(
-        pa_stream *stream,
-        const char *dev,
-        int flags,
-        pa_stream_flags_t pa_flags,
-        int latency,
-        const pa_sample_spec &spec)) {
-  if (!IsInitialized()) {
-    return NULL;
-  }
-
-  const char *dev = static_cast<const PulseAudioDeviceLocator *>(device)->
-      device_name().c_str();
-
-  StreamInterface *stream_interface = NULL;
-
-  ASSERT(params.format < ARRAY_SIZE(kCricketFormatToPulseFormatTable));
-
-  pa_sample_spec spec;
-  spec.format = kCricketFormatToPulseFormatTable[params.format];
-  spec.rate = params.freq;
-  spec.channels = params.channels;
-
-  int pa_flags = 0;
-  if (params.flags & FLAG_REPORT_LATENCY) {
-    pa_flags |= PA_STREAM_INTERPOLATE_TIMING |
-                PA_STREAM_AUTO_TIMING_UPDATE;
-  }
-
-  if (params.latency != kNoLatencyRequirements) {
-    // If configuring a specific latency then we want to specify
-    // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
-    // automatically to reach that target latency. However, that flag doesn't
-    // exist in Ubuntu 8.04 and many people still use that, so we have to check
-    // the protocol version of libpulse.
-    if (symbol_table_.pa_context_get_protocol_version()(context_) >=
-        kAdjustLatencyProtocolVersion) {
-      pa_flags |= PA_STREAM_ADJUST_LATENCY;
-    }
-  }
-
-  Lock();
-
-  pa_stream *stream = symbol_table_.pa_stream_new()(context_, stream_name,
-      &spec, NULL);
-  if (!stream) {
-    LOG(LS_ERROR) << "Can't create pa_stream";
-    goto done;
-  }
-
-  // Set a state callback to log errors.
-  symbol_table_.pa_stream_set_state_callback()(stream,
-                                               &StreamStateChangedCallbackThunk,
-                                               this);
-
-  stream_interface = (this->*connect_fn)(
-      stream,
-      dev,
-      params.flags,
-      static_cast<pa_stream_flags_t>(pa_flags),
-      params.latency,
-      spec);
-  if (!stream_interface) {
-    LOG(LS_ERROR) << "Can't connect stream to " << dev;
-    symbol_table_.pa_stream_unref()(stream);
-  }
-
- done:
-  Unlock();
-  return stream_interface;
-}
-
-// Must be called with the lock held.
-SoundOutputStreamInterface *PulseAudioSoundSystem::ConnectOutputStream(
-    pa_stream *stream,
-    const char *dev,
-    int flags,
-    pa_stream_flags_t pa_flags,
-    int latency,
-    const pa_sample_spec &spec) {
-  pa_buffer_attr attr = {0};
-  pa_buffer_attr *pattr = NULL;
-  if (latency != kNoLatencyRequirements) {
-    // kLowLatency is 0, so we treat it the same as a request for zero latency.
-    ssize_t bytes_per_sec = symbol_table_.pa_bytes_per_second()(&spec);
-    latency = rtc::_max(
-        latency,
-        static_cast<int>(
-            bytes_per_sec * kPlaybackLatencyMinimumMsecs /
-            rtc::kNumMicrosecsPerSec));
-    FillPlaybackBufferAttr(latency, &attr);
-    pattr = &attr;
-  }
-  if (symbol_table_.pa_stream_connect_playback()(
-          stream,
-          dev,
-          pattr,
-          pa_flags,
-          // Let server choose volume
-          NULL,
-          // Not synchronized to any other playout
-          NULL) != 0) {
-    return NULL;
-  }
-  return new PulseAudioOutputStream(this, stream, flags, latency);
-}
-
-// Must be called with the lock held.
-SoundInputStreamInterface *PulseAudioSoundSystem::ConnectInputStream(
-    pa_stream *stream,
-    const char *dev,
-    int flags,
-    pa_stream_flags_t pa_flags,
-    int latency,
-    const pa_sample_spec &spec) {
-  pa_buffer_attr attr = {0};
-  pa_buffer_attr *pattr = NULL;
-  if (latency != kNoLatencyRequirements) {
-    size_t bytes_per_sec = symbol_table_.pa_bytes_per_second()(&spec);
-    if (latency == kLowLatency) {
-      latency = bytes_per_sec * kLowCaptureLatencyMsecs /
-          rtc::kNumMicrosecsPerSec;
-    }
-    // Note: fragsize specifies a maximum transfer size, not a minimum, so it is
-    // not possible to force a high latency setting, only a low one.
-    attr.fragsize = latency;
-    attr.maxlength = latency + bytes_per_sec * kCaptureBufferExtraMsecs /
-        rtc::kNumMicrosecsPerSec;
-    LOG(LS_VERBOSE) << "Configuring latency = " << attr.fragsize
-                    << ", maxlength = " << attr.maxlength;
-    pattr = &attr;
-  }
-  if (symbol_table_.pa_stream_connect_record()(stream,
-                                               dev,
-                                               pattr,
-                                               pa_flags) != 0) {
-    return NULL;
-  }
-  return new PulseAudioInputStream(this, stream, flags);
-}
-
-// Must be called with the lock held.
-bool PulseAudioSoundSystem::FinishOperation(pa_operation *op) {
-  if (!op) {
-    LOG(LS_ERROR) << "Failed to start operation";
-    return false;
-  }
-
-  do {
-    Wait();
-  } while (symbol_table_.pa_operation_get_state()(op) == PA_OPERATION_RUNNING);
-
-  symbol_table_.pa_operation_unref()(op);
-
-  return true;
-}
-
-inline void PulseAudioSoundSystem::Lock() {
-  symbol_table_.pa_threaded_mainloop_lock()(mainloop_);
-}
-
-inline void PulseAudioSoundSystem::Unlock() {
-  symbol_table_.pa_threaded_mainloop_unlock()(mainloop_);
-}
-
-// Must be called with the lock held.
-inline void PulseAudioSoundSystem::Wait() {
-  symbol_table_.pa_threaded_mainloop_wait()(mainloop_);
-}
-
-// Must be called with the lock held.
-inline void PulseAudioSoundSystem::Signal() {
-  symbol_table_.pa_threaded_mainloop_signal()(mainloop_, 0);
-}
-
-// Must be called with the lock held.
-const char *PulseAudioSoundSystem::LastError() {
-  return symbol_table_.pa_strerror()(symbol_table_.pa_context_errno()(
-      context_));
-}
-
-}  // namespace cricket
-
-#endif  // HAVE_LIBPULSE
diff --git a/sound/pulseaudiosoundsystem.h b/sound/pulseaudiosoundsystem.h
deleted file mode 100644
index 16938d6..0000000
--- a/sound/pulseaudiosoundsystem.h
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
-#define TALK_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
-
-#ifdef HAVE_LIBPULSE
-
-#include "talk/sound/pulseaudiosymboltable.h"
-#include "talk/sound/soundsysteminterface.h"
-#include "webrtc/base/constructormagic.h"
-
-namespace cricket {
-
-class PulseAudioInputStream;
-class PulseAudioOutputStream;
-class PulseAudioStream;
-
-// Sound system implementation for PulseAudio, a cross-platform sound server
-// (but commonly used only on Linux, which is the only platform we support
-// it on).
-// Init(), Terminate(), and the destructor should never be invoked concurrently,
-// but all other methods are thread-safe.
-class PulseAudioSoundSystem : public SoundSystemInterface {
-  friend class PulseAudioInputStream;
-  friend class PulseAudioOutputStream;
-  friend class PulseAudioStream;
- public:
-  static SoundSystemInterface *Create() {
-    return new PulseAudioSoundSystem();
-  }
-
-  PulseAudioSoundSystem();
-
-  virtual ~PulseAudioSoundSystem();
-
-  virtual bool Init();
-  virtual void Terminate();
-
-  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
-  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
-
-  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
-  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
-
-  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-  virtual SoundInputStreamInterface *OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-
-  virtual const char *GetName() const;
-
- private:
-  bool IsInitialized();
-
-  static void ConnectToPulseCallbackThunk(pa_context *context, void *userdata);
-
-  void OnConnectToPulseCallback(pa_context *context, bool *connect_done);
-
-  bool ConnectToPulse(pa_context *context);
-
-  pa_context *CreateNewConnection();
-
-  template <typename InfoStruct>
-  bool EnumerateDevices(SoundDeviceLocatorList *devices,
-                        pa_operation *(*enumerate_fn)(
-                            pa_context *c,
-                            void (*callback_fn)(
-                                pa_context *c,
-                                const InfoStruct *i,
-                                int eol,
-                                void *userdata),
-                            void *userdata),
-                        void (*callback_fn)(
-                            pa_context *c,
-                            const InfoStruct *i,
-                            int eol,
-                            void *userdata));
-
-  static void EnumeratePlaybackDevicesCallbackThunk(pa_context *unused,
-                                                    const pa_sink_info *info,
-                                                    int eol,
-                                                    void *userdata);
-
-  static void EnumerateCaptureDevicesCallbackThunk(pa_context *unused,
-                                                   const pa_source_info *info,
-                                                   int eol,
-                                                   void *userdata);
-
-  void OnEnumeratePlaybackDevicesCallback(
-      SoundDeviceLocatorList *devices,
-      const pa_sink_info *info,
-      int eol);
-
-  void OnEnumerateCaptureDevicesCallback(
-      SoundDeviceLocatorList *devices,
-      const pa_source_info *info,
-      int eol);
-
-  template <const char *(pa_server_info::*field)>
-  static void GetDefaultDeviceCallbackThunk(
-      pa_context *unused,
-      const pa_server_info *info,
-      void *userdata);
-
-  template <const char *(pa_server_info::*field)>
-  void OnGetDefaultDeviceCallback(
-      const pa_server_info *info,
-      SoundDeviceLocator **device);
-
-  template <const char *(pa_server_info::*field)>
-  bool GetDefaultDevice(SoundDeviceLocator **device);
-
-  static void StreamStateChangedCallbackThunk(pa_stream *stream,
-                                              void *userdata);
-
-  void OnStreamStateChangedCallback(pa_stream *stream);
-
-  template <typename StreamInterface>
-  StreamInterface *OpenDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params,
-      const char *stream_name,
-      StreamInterface *(PulseAudioSoundSystem::*connect_fn)(
-          pa_stream *stream,
-          const char *dev,
-          int flags,
-          pa_stream_flags_t pa_flags,
-          int latency,
-          const pa_sample_spec &spec));
-
-  SoundOutputStreamInterface *ConnectOutputStream(
-      pa_stream *stream,
-      const char *dev,
-      int flags,
-      pa_stream_flags_t pa_flags,
-      int latency,
-      const pa_sample_spec &spec);
-
-  SoundInputStreamInterface *ConnectInputStream(
-      pa_stream *stream,
-      const char *dev,
-      int flags,
-      pa_stream_flags_t pa_flags,
-      int latency,
-      const pa_sample_spec &spec);
-
-  bool FinishOperation(pa_operation *op);
-
-  void Lock();
-  void Unlock();
-  void Wait();
-  void Signal();
-
-  const char *LastError();
-
-  pa_threaded_mainloop *mainloop_;
-  pa_context *context_;
-  PulseAudioSymbolTable symbol_table_;
-
-  DISALLOW_COPY_AND_ASSIGN(PulseAudioSoundSystem);
-};
-
-}  // namespace cricket
-
-#endif  // HAVE_LIBPULSE
-
-#endif  // TALK_SOUND_PULSEAUDIOSOUNDSYSTEM_H_
diff --git a/sound/pulseaudiosymboltable.cc b/sound/pulseaudiosymboltable.cc
deleted file mode 100644
index 344f354..0000000
--- a/sound/pulseaudiosymboltable.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef HAVE_LIBPULSE
-
-#include "talk/sound/pulseaudiosymboltable.h"
-
-namespace cricket {
-
-#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME PULSE_AUDIO_SYMBOLS_CLASS_NAME
-#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST PULSE_AUDIO_SYMBOLS_LIST
-#define LATE_BINDING_SYMBOL_TABLE_DLL_NAME "libpulse.so.0"
-#include "webrtc/base/latebindingsymboltable.cc.def"
-
-}  // namespace cricket
-
-#endif  // HAVE_LIBPULSE
diff --git a/sound/pulseaudiosymboltable.h b/sound/pulseaudiosymboltable.h
deleted file mode 100644
index 46bddea..0000000
--- a/sound/pulseaudiosymboltable.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_PULSEAUDIOSYMBOLTABLE_H_
-#define TALK_SOUND_PULSEAUDIOSYMBOLTABLE_H_
-
-#include <pulse/context.h>
-#include <pulse/def.h>
-#include <pulse/error.h>
-#include <pulse/introspect.h>
-#include <pulse/stream.h>
-#include <pulse/thread-mainloop.h>
-
-#include "webrtc/base/latebindingsymboltable.h"
-
-namespace cricket {
-
-#define PULSE_AUDIO_SYMBOLS_CLASS_NAME PulseAudioSymbolTable
-// The PulseAudio symbols we need, as an X-Macro list.
-// This list must contain precisely every libpulse function that is used in
-// pulseaudiosoundsystem.cc.
-#define PULSE_AUDIO_SYMBOLS_LIST \
-  X(pa_bytes_per_second) \
-  X(pa_context_connect) \
-  X(pa_context_disconnect) \
-  X(pa_context_errno) \
-  X(pa_context_get_protocol_version) \
-  X(pa_context_get_server_info) \
-  X(pa_context_get_sink_info_list) \
-  X(pa_context_get_sink_input_info) \
-  X(pa_context_get_source_info_by_index) \
-  X(pa_context_get_source_info_list) \
-  X(pa_context_get_state) \
-  X(pa_context_new) \
-  X(pa_context_set_sink_input_volume) \
-  X(pa_context_set_source_volume_by_index) \
-  X(pa_context_set_state_callback) \
-  X(pa_context_unref) \
-  X(pa_cvolume_set) \
-  X(pa_operation_get_state) \
-  X(pa_operation_unref) \
-  X(pa_stream_connect_playback) \
-  X(pa_stream_connect_record) \
-  X(pa_stream_disconnect) \
-  X(pa_stream_drop) \
-  X(pa_stream_get_device_index) \
-  X(pa_stream_get_index) \
-  X(pa_stream_get_latency) \
-  X(pa_stream_get_sample_spec) \
-  X(pa_stream_get_state) \
-  X(pa_stream_new) \
-  X(pa_stream_peek) \
-  X(pa_stream_readable_size) \
-  X(pa_stream_set_buffer_attr) \
-  X(pa_stream_set_overflow_callback) \
-  X(pa_stream_set_read_callback) \
-  X(pa_stream_set_state_callback) \
-  X(pa_stream_set_underflow_callback) \
-  X(pa_stream_set_write_callback) \
-  X(pa_stream_unref) \
-  X(pa_stream_writable_size) \
-  X(pa_stream_write) \
-  X(pa_strerror) \
-  X(pa_threaded_mainloop_free) \
-  X(pa_threaded_mainloop_get_api) \
-  X(pa_threaded_mainloop_lock) \
-  X(pa_threaded_mainloop_new) \
-  X(pa_threaded_mainloop_signal) \
-  X(pa_threaded_mainloop_start) \
-  X(pa_threaded_mainloop_stop) \
-  X(pa_threaded_mainloop_unlock) \
-  X(pa_threaded_mainloop_wait)
-
-#define LATE_BINDING_SYMBOL_TABLE_CLASS_NAME PULSE_AUDIO_SYMBOLS_CLASS_NAME
-#define LATE_BINDING_SYMBOL_TABLE_SYMBOLS_LIST PULSE_AUDIO_SYMBOLS_LIST
-#include "webrtc/base/latebindingsymboltable.h.def"
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_PULSEAUDIOSYMBOLTABLE_H_
diff --git a/sound/sounddevicelocator.h b/sound/sounddevicelocator.h
deleted file mode 100644
index 420226f..0000000
--- a/sound/sounddevicelocator.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDDEVICELOCATOR_H_
-#define TALK_SOUND_SOUNDDEVICELOCATOR_H_
-
-#include <string>
-
-#include "webrtc/base/constructormagic.h"
-
-namespace cricket {
-
-// A simple container for holding the name of a device and any additional id
-// information needed to locate and open it. Implementations of
-// SoundSystemInterface must subclass this to add any id information that they
-// need.
-class SoundDeviceLocator {
- public:
-  virtual ~SoundDeviceLocator() {}
-
-  // Human-readable name for the device.
-  const std::string &name() const { return name_; }
-
-  // Name sound system uses to locate this device.
-  const std::string &device_name() const { return device_name_; }
-
-  // Makes a duplicate of this locator.
-  virtual SoundDeviceLocator *Copy() const = 0;
-
- protected:
-  SoundDeviceLocator(const std::string &name,
-                     const std::string &device_name)
-      : name_(name), device_name_(device_name) {}
-
-  explicit SoundDeviceLocator(const SoundDeviceLocator &that)
-      : name_(that.name_), device_name_(that.device_name_) {}
-
-  std::string name_;
-  std::string device_name_;
-
- private:
-  DISALLOW_ASSIGN(SoundDeviceLocator);
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDDEVICELOCATOR_H_
diff --git a/sound/soundinputstreaminterface.h b/sound/soundinputstreaminterface.h
deleted file mode 100644
index e557392..0000000
--- a/sound/soundinputstreaminterface.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDINPUTSTREAMINTERFACE_H_
-#define TALK_SOUND_SOUNDINPUTSTREAMINTERFACE_H_
-
-#include "webrtc/base/constructormagic.h"
-#include "webrtc/base/sigslot.h"
-
-namespace cricket {
-
-// Interface for consuming an input stream from a recording device.
-// Semantics and thread-safety of StartReading()/StopReading() are the same as
-// for rtc::Worker.
-class SoundInputStreamInterface {
- public:
-  virtual ~SoundInputStreamInterface() {}
-
-  // Starts the reading of samples on the current thread.
-  virtual bool StartReading() = 0;
-  // Stops the reading of samples.
-  virtual bool StopReading() = 0;
-
-  // Retrieves the current input volume for this stream. Nominal range is
-  // defined by SoundSystemInterface::k(Max|Min)Volume, but values exceeding the
-  // max may be possible in some implementations. This call retrieves the actual
-  // volume currently in use by the OS, not a cached value from a previous
-  // (Get|Set)Volume() call.
-  virtual bool GetVolume(int *volume) = 0;
-
-  // Changes the input volume for this stream. Nominal range is defined by
-  // SoundSystemInterface::k(Max|Min)Volume. The effect of exceeding kMaxVolume
-  // is implementation-defined.
-  virtual bool SetVolume(int volume) = 0;
-
-  // Closes this stream object. If currently reading then this may only be
-  // called from the reading thread.
-  virtual bool Close() = 0;
-
-  // Get the latency of the stream.
-  virtual int LatencyUsecs() = 0;
-
-  // Notifies the consumer of new data read from the device.
-  // The first parameter is a pointer to the data read, and is only valid for
-  // the duration of the call.
-  // The second parameter is the amount of data read in bytes (i.e., the valid
-  // length of the memory pointed to).
-  // The 3rd parameter is the stream that is issuing the callback.
-  sigslot::signal3<const void *, size_t,
-      SoundInputStreamInterface *> SignalSamplesRead;
-
- protected:
-  SoundInputStreamInterface() {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(SoundInputStreamInterface);
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
diff --git a/sound/soundoutputstreaminterface.h b/sound/soundoutputstreaminterface.h
deleted file mode 100644
index 294906d..0000000
--- a/sound/soundoutputstreaminterface.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
-#define TALK_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
-
-#include "webrtc/base/constructormagic.h"
-#include "webrtc/base/sigslot.h"
-
-namespace cricket {
-
-// Interface for outputting a stream to a playback device.
-// Semantics and thread-safety of EnableBufferMonitoring()/
-// DisableBufferMonitoring() are the same as for rtc::Worker.
-class SoundOutputStreamInterface {
- public:
-  virtual ~SoundOutputStreamInterface() {}
-
-  // Enables monitoring the available buffer space on the current thread.
-  virtual bool EnableBufferMonitoring() = 0;
-  // Disables the monitoring.
-  virtual bool DisableBufferMonitoring() = 0;
-
-  // Write the given samples to the devices. If currently monitoring then this
-  // may only be called from the monitoring thread.
-  virtual bool WriteSamples(const void *sample_data,
-                            size_t size) = 0;
-
-  // Retrieves the current output volume for this stream. Nominal range is
-  // defined by SoundSystemInterface::k(Max|Min)Volume, but values exceeding the
-  // max may be possible in some implementations. This call retrieves the actual
-  // volume currently in use by the OS, not a cached value from a previous
-  // (Get|Set)Volume() call.
-  virtual bool GetVolume(int *volume) = 0;
-
-  // Changes the output volume for this stream. Nominal range is defined by
-  // SoundSystemInterface::k(Max|Min)Volume. The effect of exceeding kMaxVolume
-  // is implementation-defined.
-  virtual bool SetVolume(int volume) = 0;
-
-  // Closes this stream object. If currently monitoring then this may only be
-  // called from the monitoring thread.
-  virtual bool Close() = 0;
-
-  // Get the latency of the stream.
-  virtual int LatencyUsecs() = 0;
-
-  // Notifies the producer of the available buffer space for writes.
-  // It fires continuously as long as the space is greater than zero.
-  // The first parameter is the amount of buffer space available for data to
-  // be written (i.e., the maximum amount of data that can be written right now
-  // with WriteSamples() without blocking).
-  // The 2nd parameter is the stream that is issuing the callback.
-  sigslot::signal2<size_t, SoundOutputStreamInterface *> SignalBufferSpace;
-
- protected:
-  SoundOutputStreamInterface() {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(SoundOutputStreamInterface);
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDOUTPUTSTREAMINTERFACE_H_
diff --git a/sound/soundsystemfactory.h b/sound/soundsystemfactory.h
deleted file mode 100644
index 06a1c3f..0000000
--- a/sound/soundsystemfactory.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDSYSTEMFACTORY_H_
-#define TALK_SOUND_SOUNDSYSTEMFACTORY_H_
-
-#include "webrtc/base/referencecountedsingletonfactory.h"
-
-namespace cricket {
-
-class SoundSystemInterface;
-
-typedef rtc::ReferenceCountedSingletonFactory<SoundSystemInterface>
-    SoundSystemFactory;
-
-typedef rtc::rcsf_ptr<SoundSystemInterface> SoundSystemHandle;
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDSYSTEMFACTORY_H_
diff --git a/sound/soundsysteminterface.cc b/sound/soundsysteminterface.cc
deleted file mode 100644
index b432262..0000000
--- a/sound/soundsysteminterface.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/soundsysteminterface.h"
-
-#include "talk/sound/sounddevicelocator.h"
-
-namespace cricket {
-
-void SoundSystemInterface::ClearSoundDeviceLocatorList(
-    SoundSystemInterface::SoundDeviceLocatorList *devices) {
-  for (SoundDeviceLocatorList::iterator i = devices->begin();
-       i != devices->end();
-       ++i) {
-    if (*i) {
-      delete *i;
-    }
-  }
-  devices->clear();
-}
-
-}  // namespace cricket
diff --git a/sound/soundsysteminterface.h b/sound/soundsysteminterface.h
deleted file mode 100644
index 5d3e84b..0000000
--- a/sound/soundsysteminterface.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice,
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDSYSTEMINTERFACE_H_
-#define TALK_SOUND_SOUNDSYSTEMINTERFACE_H_
-
-#include <vector>
-
-#include "webrtc/base/constructormagic.h"
-
-namespace cricket {
-
-class SoundDeviceLocator;
-class SoundInputStreamInterface;
-class SoundOutputStreamInterface;
-
-// Interface for a platform's sound system.
-// Implementations must guarantee thread-safety for at least the following use
-// cases:
-// 1) Concurrent enumeration and opening of devices from different threads.
-// 2) Concurrent use of different Sound(Input|Output)StreamInterface
-// instances from different threads (but concurrent use of the _same_ one from
-// different threads need not be supported).
-class SoundSystemInterface {
- public:
-  typedef std::vector<SoundDeviceLocator *> SoundDeviceLocatorList;
-
-  enum SampleFormat {
-    // Only one supported sample format at this time.
-    // The values here may be used in lookup tables, so they shouldn't change.
-    FORMAT_S16LE = 0,
-  };
-
-  enum Flags {
-    // Enable reporting the current stream latency in
-    // Sound(Input|Output)StreamInterface. See those classes for more details.
-    FLAG_REPORT_LATENCY = (1 << 0),
-  };
-
-  struct OpenParams {
-    // Format for the sound stream.
-    SampleFormat format;
-    // Sampling frequency in hertz.
-    unsigned int freq;
-    // Number of channels in the PCM stream.
-    unsigned int channels;
-    // Misc flags. Should be taken from the Flags enum above.
-    int flags;
-    // Desired latency, measured as number of bytes of sample data
-    int latency;
-  };
-
-  // Special values for the "latency" field of OpenParams.
-  // Use this one to say you don't care what the latency is. The sound system
-  // will optimize for other things instead.
-  static const int kNoLatencyRequirements = -1;
-  // Use this one to say that you want the sound system to pick an appropriate
-  // small latency value. The sound system may pick the minimum allowed one, or
-  // a slightly higher one in the event that the true minimum requires an
-  // undesirable trade-off.
-  static const int kLowLatency = 0;
- 
-  // Max value for the volume parameters for Sound(Input|Output)StreamInterface.
-  static const int kMaxVolume = 255;
-  // Min value for the volume parameters for Sound(Input|Output)StreamInterface.
-  static const int kMinVolume = 0;
-
-  // Helper for clearing a locator list and deleting the entries.
-  static void ClearSoundDeviceLocatorList(SoundDeviceLocatorList *devices);
-
-  virtual ~SoundSystemInterface() {}
-
-  virtual bool Init() = 0;
-  virtual void Terminate() = 0;
-
-  // Enumerates the available devices. (Any pre-existing locators in the lists
-  // are deleted.)
-  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices) = 0;
-  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices) = 0;
-
-  // Gets a special locator for the default device.
-  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device) = 0;
-  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device) = 0;
-
-  // Opens the given device, or returns NULL on error.
-  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params) = 0;
-  virtual SoundInputStreamInterface *OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params) = 0;
-
-  // A human-readable name for this sound system.
-  virtual const char *GetName() const = 0;
-
- protected:
-  SoundSystemInterface() {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(SoundSystemInterface);
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDSYSTEMINTERFACE_H_
diff --git a/sound/soundsystemproxy.cc b/sound/soundsystemproxy.cc
deleted file mode 100644
index 737a6bb..0000000
--- a/sound/soundsystemproxy.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "talk/sound/soundsystemproxy.h"
-
-namespace cricket {
-
-bool SoundSystemProxy::EnumeratePlaybackDevices(
-    SoundDeviceLocatorList *devices) {
-  return wrapped_ ? wrapped_->EnumeratePlaybackDevices(devices) : false;
-}
-
-bool SoundSystemProxy::EnumerateCaptureDevices(
-    SoundDeviceLocatorList *devices) {
-  return wrapped_ ? wrapped_->EnumerateCaptureDevices(devices) : false;
-}
-
-bool SoundSystemProxy::GetDefaultPlaybackDevice(
-    SoundDeviceLocator **device) {
-  return wrapped_ ? wrapped_->GetDefaultPlaybackDevice(device) : false;
-}
-
-bool SoundSystemProxy::GetDefaultCaptureDevice(
-    SoundDeviceLocator **device) {
-  return wrapped_ ? wrapped_->GetDefaultCaptureDevice(device) : false;
-}
-
-SoundOutputStreamInterface *SoundSystemProxy::OpenPlaybackDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return wrapped_ ? wrapped_->OpenPlaybackDevice(device, params) : NULL;
-}
-
-SoundInputStreamInterface *SoundSystemProxy::OpenCaptureDevice(
-    const SoundDeviceLocator *device,
-    const OpenParams &params) {
-  return wrapped_ ? wrapped_->OpenCaptureDevice(device, params) : NULL;
-}
-
-}  // namespace cricket
diff --git a/sound/soundsystemproxy.h b/sound/soundsystemproxy.h
deleted file mode 100644
index 0b8a3da..0000000
--- a/sound/soundsystemproxy.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * libjingle
- * Copyright 2004--2010, Google Inc.
- *
- * Redistribution and use in source and binary forms, with or without 
- * modification, are permitted provided that the following conditions are met:
- *
- *  1. Redistributions of source code must retain the above copyright notice, 
- *     this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright notice,
- *     this list of conditions and the following disclaimer in the documentation
- *     and/or other materials provided with the distribution.
- *  3. The name of the author may not be used to endorse or promote products 
- *     derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TALK_SOUND_SOUNDSYSTEMPROXY_H_
-#define TALK_SOUND_SOUNDSYSTEMPROXY_H_
-
-#include "talk/sound/soundsysteminterface.h"
-#include "webrtc/base/basictypes.h"  // for NULL
-
-namespace cricket {
-
-// A SoundSystemProxy is a sound system that defers to another one.
-// Init(), Terminate(), and GetName() are left as pure virtual, so a sub-class
-// must define them.
-class SoundSystemProxy : public SoundSystemInterface {
- public:
-  SoundSystemProxy() : wrapped_(NULL) {}
-
-  // Each of these methods simply defers to wrapped_ if non-NULL, else fails.
-
-  virtual bool EnumeratePlaybackDevices(SoundDeviceLocatorList *devices);
-  virtual bool EnumerateCaptureDevices(SoundDeviceLocatorList *devices);
-
-  virtual bool GetDefaultPlaybackDevice(SoundDeviceLocator **device);
-  virtual bool GetDefaultCaptureDevice(SoundDeviceLocator **device);
-
-  virtual SoundOutputStreamInterface *OpenPlaybackDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-  virtual SoundInputStreamInterface *OpenCaptureDevice(
-      const SoundDeviceLocator *device,
-      const OpenParams &params);
-
- protected:
-  SoundSystemInterface *wrapped_;
-};
-
-}  // namespace cricket
-
-#endif  // TALK_SOUND_SOUNDSYSTEMPROXY_H_
diff --git a/xmpp/asyncsocket.h b/xmpp/asyncsocket.h
index 5e83b63..2514742 100644
--- a/xmpp/asyncsocket.h
+++ b/xmpp/asyncsocket.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _ASYNCSOCKET_H_
-#define _ASYNCSOCKET_H_
+#ifndef TALK_XMPP_ASYNCSOCKET_H_
+#define TALK_XMPP_ASYNCSOCKET_H_
 
 #include <string>
 
@@ -86,4 +86,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_ASYNCSOCKET_H_
diff --git a/xmpp/chatroommodule.h b/xmpp/chatroommodule.h
index 47a7106..8358fc1 100644
--- a/xmpp/chatroommodule.h
+++ b/xmpp/chatroommodule.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _multiuserchatmodule_h_
-#define _multiuserchatmodule_h_
+#ifndef TALK_XMPP_CHATROOMMODULE_H_
+#define TALK_XMPP_CHATROOMMODULE_H_
 
 #include "talk/xmpp/module.h"
 #include "talk/xmpp/rostermodule.h"
@@ -267,4 +267,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_CHATROOMMODULE_H_
diff --git a/xmpp/constants.cc b/xmpp/constants.cc
index a2179ca..297eafd 100644
--- a/xmpp/constants.cc
+++ b/xmpp/constants.cc
@@ -29,9 +29,9 @@
 
 #include <string>
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlconstants.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlconstants.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/jid.h"
 #include "webrtc/base/basicdefs.h"
 
diff --git a/xmpp/constants.h b/xmpp/constants.h
index 6d94095..6aa1a54 100644
--- a/xmpp/constants.h
+++ b/xmpp/constants.h
@@ -29,7 +29,7 @@
 #define TALK_XMPP_CONSTANTS_H_
 
 #include <string>
-#include "talk/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/qname.h"
 #include "talk/xmpp/jid.h"
 
 namespace buzz {
diff --git a/xmpp/hangoutpubsubclient.cc b/xmpp/hangoutpubsubclient.cc
index 834f844..63f5bcf 100644
--- a/xmpp/hangoutpubsubclient.cc
+++ b/xmpp/hangoutpubsubclient.cc
@@ -27,8 +27,8 @@
 
 #include "talk/xmpp/hangoutpubsubclient.h"
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/jid.h"
 #include "webrtc/base/logging.h"
diff --git a/xmpp/hangoutpubsubclient_unittest.cc b/xmpp/hangoutpubsubclient_unittest.cc
index c12b791..555ee5c 100644
--- a/xmpp/hangoutpubsubclient_unittest.cc
+++ b/xmpp/hangoutpubsubclient_unittest.cc
@@ -3,8 +3,8 @@
 
 #include <string>
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/hangoutpubsubclient.h"
diff --git a/xmpp/jid.h b/xmpp/jid.h
index 7a6370c..2bfe0d9 100644
--- a/xmpp/jid.h
+++ b/xmpp/jid.h
@@ -29,7 +29,7 @@
 #define TALK_XMPP_JID_H_
 
 #include <string>
-#include "talk/xmllite/xmlconstants.h"
+#include "webrtc/libjingle/xmllite/xmlconstants.h"
 #include "webrtc/base/basictypes.h"
 
 namespace buzz {
diff --git a/xmpp/jingleinfotask.h b/xmpp/jingleinfotask.h
index 5865a77..18cb5a9 100644
--- a/xmpp/jingleinfotask.h
+++ b/xmpp/jingleinfotask.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef TALK_EXAMPLES_LOGIN_JINGLEINFOTASK_H_
-#define TALK_EXAMPLES_LOGIN_JINGLEINFOTASK_H_
+#ifndef TALK_XMPP_JINGLEINFOTASK_H_
+#define TALK_XMPP_JINGLEINFOTASK_H_
 
 #include <vector>
 
@@ -58,4 +58,4 @@
 };
 }
 
-#endif  // TALK_EXAMPLES_LOGIN_JINGLEINFOTASK_H_
+#endif  // TALK_XMPP_JINGLEINFOTASK_H_
diff --git a/xmpp/module.h b/xmpp/module.h
index 37396ef..a5d0687 100644
--- a/xmpp/module.h
+++ b/xmpp/module.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _module_h_
-#define _module_h_
+#ifndef TALK_XMPP_MODULE_H_
+#define TALK_XMPP_MODULE_H_
 
 #include "talk/xmpp/xmppengine.h"
 
@@ -49,4 +49,4 @@
 };
 
 }
-#endif
+#endif  // TALK_XMPP_MODULE_H_
diff --git a/xmpp/moduleimpl.h b/xmpp/moduleimpl.h
index 32a182b..897bfce 100644
--- a/xmpp/moduleimpl.h
+++ b/xmpp/moduleimpl.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _moduleimpl_h_
-#define _moduleimpl_h_
+#ifndef TALK_XMPP_MODULEIMPL_H_
+#define TALK_XMPP_MODULEIMPL_H_
 
 #include "talk/xmpp/module.h"
 #include "talk/xmpp/xmppengine.h"
@@ -90,4 +90,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_MODULEIMPL_H_
diff --git a/xmpp/mucroomconfigtask_unittest.cc b/xmpp/mucroomconfigtask_unittest.cc
index 00a9c6e..bf5e7b4 100644
--- a/xmpp/mucroomconfigtask_unittest.cc
+++ b/xmpp/mucroomconfigtask_unittest.cc
@@ -28,7 +28,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/mucroomconfigtask.h"
diff --git a/xmpp/mucroomdiscoverytask_unittest.cc b/xmpp/mucroomdiscoverytask_unittest.cc
index ef7e477..e1a633e 100644
--- a/xmpp/mucroomdiscoverytask_unittest.cc
+++ b/xmpp/mucroomdiscoverytask_unittest.cc
@@ -28,7 +28,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/mucroomdiscoverytask.h"
diff --git a/xmpp/mucroomlookuptask_unittest.cc b/xmpp/mucroomlookuptask_unittest.cc
index 341fd3f..03be292 100644
--- a/xmpp/mucroomlookuptask_unittest.cc
+++ b/xmpp/mucroomlookuptask_unittest.cc
@@ -28,7 +28,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/mucroomlookuptask.h"
diff --git a/xmpp/mucroomuniquehangoutidtask_unittest.cc b/xmpp/mucroomuniquehangoutidtask_unittest.cc
index 5cabcbe..42bed13 100644
--- a/xmpp/mucroomuniquehangoutidtask_unittest.cc
+++ b/xmpp/mucroomuniquehangoutidtask_unittest.cc
@@ -28,7 +28,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/mucroomuniquehangoutidtask.h"
diff --git a/xmpp/pingtask_unittest.cc b/xmpp/pingtask_unittest.cc
index ce595a8..fe88a5c 100644
--- a/xmpp/pingtask_unittest.cc
+++ b/xmpp/pingtask_unittest.cc
@@ -28,7 +28,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/pingtask.h"
diff --git a/xmpp/plainsaslhandler.h b/xmpp/plainsaslhandler.h
index 2ca364c..31032e4 100644
--- a/xmpp/plainsaslhandler.h
+++ b/xmpp/plainsaslhandler.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _PLAINSASLHANDLER_H_
-#define _PLAINSASLHANDLER_H_
+#ifndef TALK_XMPP_PLAINSASLHANDLER_H_
+#define TALK_XMPP_PLAINSASLHANDLER_H_
 
 #include <algorithm>
 #include "talk/xmpp/saslhandler.h"
@@ -78,4 +78,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_PLAINSASLHANDLER_H_
diff --git a/xmpp/presenceouttask.h b/xmpp/presenceouttask.h
index a72457e..53bbae5 100644
--- a/xmpp/presenceouttask.h
+++ b/xmpp/presenceouttask.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _PRESENCEOUTTASK_H_
-#define _PRESENCEOUTTASK_H_
+#ifndef TALK_XMPP_PRESENCEOUTTASK_H_
+#define TALK_XMPP_PRESENCEOUTTASK_H_
 
 #include "talk/xmpp/presencestatus.h"
 #include "talk/xmpp/xmppengine.h"
@@ -51,4 +51,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_PRESENCEOUTTASK_H_
diff --git a/xmpp/pubsub_task.h b/xmpp/pubsub_task.h
index 45a7462..2787cbc 100644
--- a/xmpp/pubsub_task.h
+++ b/xmpp/pubsub_task.h
@@ -30,7 +30,7 @@
 
 #include <map>
 #include <string>
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/jid.h"
 #include "talk/xmpp/xmpptask.h"
 
diff --git a/xmpp/pubsubclient_unittest.cc b/xmpp/pubsubclient_unittest.cc
index 135dc37..f191a18 100644
--- a/xmpp/pubsubclient_unittest.cc
+++ b/xmpp/pubsubclient_unittest.cc
@@ -3,8 +3,8 @@
 
 #include <string>
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/jid.h"
diff --git a/xmpp/pubsubstateclient.h b/xmpp/pubsubstateclient.h
index 2acbdaa..09ef0f4 100644
--- a/xmpp/pubsubstateclient.h
+++ b/xmpp/pubsubstateclient.h
@@ -32,8 +32,8 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/jid.h"
 #include "talk/xmpp/pubsubclient.h"
diff --git a/xmpp/pubsubtasks_unittest.cc b/xmpp/pubsubtasks_unittest.cc
index 98cb7c1..48cd04a 100644
--- a/xmpp/pubsubtasks_unittest.cc
+++ b/xmpp/pubsubtasks_unittest.cc
@@ -3,8 +3,8 @@
 
 #include <string>
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/fakexmppclient.h"
 #include "talk/xmpp/iqtask.h"
diff --git a/xmpp/rostermodule.h b/xmpp/rostermodule.h
index 7e14dc1..dfb647d 100644
--- a/xmpp/rostermodule.h
+++ b/xmpp/rostermodule.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _rostermodule_h_
-#define _rostermodule_h_
+#ifndef TALK_XMPP_ROSTERMODULE_H_
+#define TALK_XMPP_ROSTERMODULE_H_
 
 #include "talk/xmpp/module.h"
 
@@ -345,4 +345,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_ROSTERMODULE_H_
diff --git a/xmpp/rostermodule_unittest.cc b/xmpp/rostermodule_unittest.cc
index 0e2b268..cb7f773 100644
--- a/xmpp/rostermodule_unittest.cc
+++ b/xmpp/rostermodule_unittest.cc
@@ -29,7 +29,7 @@
 #include <sstream>
 #include <string>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/rostermodule.h"
 #include "talk/xmpp/util_unittest.h"
diff --git a/xmpp/rostermoduleimpl.h b/xmpp/rostermoduleimpl.h
index a6b15cf..37d1117 100644
--- a/xmpp/rostermoduleimpl.h
+++ b/xmpp/rostermoduleimpl.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _rostermoduleimpl_h_
-#define _rostermoduleimpl_h_
+#ifndef TALK_XMPP_XMPPTHREAD_H_
+#define TALK_XMPP_XMPPTHREAD_H_
 
 #include "talk/xmpp/moduleimpl.h"
 #include "talk/xmpp/rostermodule.h"
@@ -299,4 +299,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_XMPPTHREAD_H_
diff --git a/xmpp/saslcookiemechanism.h b/xmpp/saslcookiemechanism.h
index 7a19d3e..eda142c 100644
--- a/xmpp/saslcookiemechanism.h
+++ b/xmpp/saslcookiemechanism.h
@@ -28,8 +28,8 @@
 #ifndef TALK_XMPP_SASLCOOKIEMECHANISM_H_
 #define TALK_XMPP_SASLCOOKIEMECHANISM_H_
 
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/saslmechanism.h"
 
diff --git a/xmpp/saslhandler.h b/xmpp/saslhandler.h
index bead8aa..a4a73e4 100644
--- a/xmpp/saslhandler.h
+++ b/xmpp/saslhandler.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _SASLHANDLER_H_
-#define _SASLHANDLER_H_
+#ifndef TALK_XMPP_SASLHANDLER_H_
+#define TALK_XMPP_SASLHANDLER_H_
 
 #include <string>
 #include <vector>
@@ -56,4 +56,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_SASLHANDLER_H_
diff --git a/xmpp/saslmechanism.cc b/xmpp/saslmechanism.cc
index 488007c..8877084 100644
--- a/xmpp/saslmechanism.cc
+++ b/xmpp/saslmechanism.cc
@@ -25,7 +25,7 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/saslmechanism.h"
 #include "webrtc/base/base64.h"
diff --git a/xmpp/saslmechanism.h b/xmpp/saslmechanism.h
index f2e5adc..ee419d1 100644
--- a/xmpp/saslmechanism.h
+++ b/xmpp/saslmechanism.h
@@ -25,8 +25,8 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _SASLMECHANISM_H_
-#define _SASLMECHANISM_H_
+#ifndef TALK_XMPP_SASLMECHANISM_H_
+#define TALK_XMPP_SASLMECHANISM_H_
 
 #include <string>
 
@@ -71,4 +71,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_SASLMECHANISM_H_
diff --git a/xmpp/util_unittest.cc b/xmpp/util_unittest.cc
index 66fb5ef..3e47d3f 100644
--- a/xmpp/util_unittest.cc
+++ b/xmpp/util_unittest.cc
@@ -4,7 +4,7 @@
 #include <iostream>
 #include <sstream>
 #include <string>
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/util_unittest.h"
 #include "talk/xmpp/xmppengine.h"
 #include "webrtc/base/gunit.h"
diff --git a/xmpp/xmppengine.h b/xmpp/xmppengine.h
index 68f10fe..461e90f 100644
--- a/xmpp/xmppengine.h
+++ b/xmpp/xmppengine.h
@@ -25,12 +25,12 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _xmppengine_h_
-#define _xmppengine_h_
+#ifndef TALK_XMPP_XMPPENGINE_H_
+#define TALK_XMPP_XMPPENGINE_H_
 
 // also part of the API
-#include "talk/xmllite/qname.h"
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/qname.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/jid.h"
 
 
@@ -346,4 +346,4 @@
   } while (false)                     \
 
 
-#endif
+#endif  // TALK_XMPP_XMPPENGINE_H_
diff --git a/xmpp/xmppengine_unittest.cc b/xmpp/xmppengine_unittest.cc
index 8d6c858..b519a65 100644
--- a/xmpp/xmppengine_unittest.cc
+++ b/xmpp/xmppengine_unittest.cc
@@ -4,7 +4,7 @@
 #include <iostream>
 #include <sstream>
 #include <string>
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/plainsaslhandler.h"
 #include "talk/xmpp/saslplainmechanism.h"
diff --git a/xmpp/xmppengineimpl.cc b/xmpp/xmppengineimpl.cc
index bba96a4..5de9de7 100644
--- a/xmpp/xmppengineimpl.cc
+++ b/xmpp/xmppengineimpl.cc
@@ -31,8 +31,8 @@
 #include <sstream>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
-#include "talk/xmllite/xmlprinter.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlprinter.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/saslhandler.h"
 #include "talk/xmpp/xmpplogintask.h"
diff --git a/xmpp/xmpplogintask.cc b/xmpp/xmpplogintask.cc
index 2183c40..a48a94c 100644
--- a/xmpp/xmpplogintask.cc
+++ b/xmpp/xmpplogintask.cc
@@ -30,7 +30,7 @@
 #include <string>
 #include <vector>
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/jid.h"
 #include "talk/xmpp/saslmechanism.h"
diff --git a/xmpp/xmpplogintask_unittest.cc b/xmpp/xmpplogintask_unittest.cc
index 1cc16cc..ae9a554 100644
--- a/xmpp/xmpplogintask_unittest.cc
+++ b/xmpp/xmpplogintask_unittest.cc
@@ -4,7 +4,7 @@
 #include <iostream>
 #include <sstream>
 #include <string>
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "talk/xmpp/plainsaslhandler.h"
 #include "talk/xmpp/saslplainmechanism.h"
diff --git a/xmpp/xmppstanzaparser.cc b/xmpp/xmppstanzaparser.cc
index 3e6ad47..4795839 100644
--- a/xmpp/xmppstanzaparser.cc
+++ b/xmpp/xmppstanzaparser.cc
@@ -27,7 +27,7 @@
 
 #include "talk/xmpp/xmppstanzaparser.h"
 
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/constants.h"
 #include "webrtc/base/common.h"
 #ifdef EXPAT_RELATIVE_PATH
diff --git a/xmpp/xmppstanzaparser.h b/xmpp/xmppstanzaparser.h
index d9a8933..ffb3275 100644
--- a/xmpp/xmppstanzaparser.h
+++ b/xmpp/xmppstanzaparser.h
@@ -25,11 +25,11 @@
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _xmppstanzaparser_h_
-#define _xmppstanzaparser_h_
+#ifndef TALK_XMPP_XMPPSTANZAPARSER_H_
+#define TALK_XMPP_XMPPSTANZAPARSER_H_
 
-#include "talk/xmllite/xmlbuilder.h"
-#include "talk/xmllite/xmlparser.h"
+#include "webrtc/libjingle/xmllite/xmlbuilder.h"
+#include "webrtc/libjingle/xmllite/xmlparser.h"
 
 
 namespace buzz {
@@ -94,4 +94,4 @@
 
 }
 
-#endif
+#endif  // TALK_XMPP_XMPPSTANZAPARSER_H_
diff --git a/xmpp/xmppstanzaparser_unittest.cc b/xmpp/xmppstanzaparser_unittest.cc
index 4e60d81..0b114c0 100644
--- a/xmpp/xmppstanzaparser_unittest.cc
+++ b/xmpp/xmppstanzaparser_unittest.cc
@@ -4,7 +4,7 @@
 #include <iostream>
 #include <sstream>
 #include <string>
-#include "talk/xmllite/xmlelement.h"
+#include "webrtc/libjingle/xmllite/xmlelement.h"
 #include "talk/xmpp/xmppstanzaparser.h"
 #include "webrtc/base/common.h"
 #include "webrtc/base/gunit.h"