VoE: apply new style guide on VoE interfaces and their implementations

Changes:
1. Ran clang-format on VoE interfaces and their implementations.
2. Replaced virtual with override in derived classes.

R=henrika@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/49239004

Cr-Commit-Position: refs/heads/master@{#9130}
diff --git a/webrtc/voice_engine/include/voe_audio_processing.h b/webrtc/voice_engine/include/voe_audio_processing.h
index 162848c..fd70f95 100644
--- a/webrtc/voice_engine/include/voe_audio_processing.h
+++ b/webrtc/voice_engine/include/voe_audio_processing.h
@@ -44,200 +44,195 @@
 class VoiceEngine;
 
 // VoERxVadCallback
-class WEBRTC_DLLEXPORT VoERxVadCallback
-{
-public:
-    virtual void OnRxVad(int channel, int vadDecision) = 0;
+class WEBRTC_DLLEXPORT VoERxVadCallback {
+ public:
+  virtual void OnRxVad(int channel, int vadDecision) = 0;
 
-protected:
-    virtual ~VoERxVadCallback() {}
+ protected:
+  virtual ~VoERxVadCallback() {}
 };
 
 // VoEAudioProcessing
-class WEBRTC_DLLEXPORT VoEAudioProcessing
-{
-public:
-    // Factory for the VoEAudioProcessing sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEAudioProcessing {
+ public:
+  // Factory for the VoEAudioProcessing sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEAudioProcessing sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEAudioProcessing sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Sets Noise Suppression (NS) status and mode.
-    // The NS reduces noise in the microphone signal.
-    virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
+  // Sets Noise Suppression (NS) status and mode.
+  // The NS reduces noise in the microphone signal.
+  virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
 
-    // Gets the NS status and mode.
-    virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
+  // Gets the NS status and mode.
+  virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
 
-    // Sets the Automatic Gain Control (AGC) status and mode.
-    // The AGC adjusts the microphone signal to an appropriate level.
-    virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
+  // Sets the Automatic Gain Control (AGC) status and mode.
+  // The AGC adjusts the microphone signal to an appropriate level.
+  virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
 
-    // Gets the AGC status and mode.
-    virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
+  // Gets the AGC status and mode.
+  virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
 
-    // Sets the AGC configuration.
-    // Should only be used in situations where the working environment
-    // is well known.
-    virtual int SetAgcConfig(AgcConfig config) = 0;
+  // Sets the AGC configuration.
+  // Should only be used in situations where the working environment
+  // is well known.
+  virtual int SetAgcConfig(AgcConfig config) = 0;
 
-    // Gets the AGC configuration.
-    virtual int GetAgcConfig(AgcConfig& config) = 0;
+  // Gets the AGC configuration.
+  virtual int GetAgcConfig(AgcConfig& config) = 0;
 
-    // Sets the Echo Control (EC) status and mode.
-    // The EC mitigates acoustic echo where a user can hear their own
-    // speech repeated back due to an acoustic coupling between the
-    // speaker and the microphone at the remote end.
-    virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
+  // Sets the Echo Control (EC) status and mode.
+  // The EC mitigates acoustic echo where a user can hear their own
+  // speech repeated back due to an acoustic coupling between the
+  // speaker and the microphone at the remote end.
+  virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
 
-    // Gets the EC status and mode.
-    virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
+  // Gets the EC status and mode.
+  virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
 
-    // Enables the compensation of clock drift between the capture and render
-    // streams by the echo canceller (i.e. only using EcMode==kEcAec). It will
-    // only be enabled if supported on the current platform; otherwise an error
-    // will be returned. Check if the platform is supported by calling
-    // |DriftCompensationSupported()|.
-    virtual int EnableDriftCompensation(bool enable) = 0;
-    virtual bool DriftCompensationEnabled() = 0;
-    static bool DriftCompensationSupported();
+  // Enables the compensation of clock drift between the capture and render
+  // streams by the echo canceller (i.e. only using EcMode==kEcAec). It will
+  // only be enabled if supported on the current platform; otherwise an error
+  // will be returned. Check if the platform is supported by calling
+  // |DriftCompensationSupported()|.
+  virtual int EnableDriftCompensation(bool enable) = 0;
+  virtual bool DriftCompensationEnabled() = 0;
+  static bool DriftCompensationSupported();
 
-    // Sets a delay |offset| in ms to add to the system delay reported by the
-    // OS, which is used by the AEC to synchronize far- and near-end streams.
-    // In some cases a system may introduce a delay which goes unreported by the
-    // OS, but which is known to the user. This method can be used to compensate
-    // for the unreported delay.
-    virtual void SetDelayOffsetMs(int offset) = 0;
-    virtual int DelayOffsetMs() = 0;
+  // Sets a delay |offset| in ms to add to the system delay reported by the
+  // OS, which is used by the AEC to synchronize far- and near-end streams.
+  // In some cases a system may introduce a delay which goes unreported by the
+  // OS, but which is known to the user. This method can be used to compensate
+  // for the unreported delay.
+  virtual void SetDelayOffsetMs(int offset) = 0;
+  virtual int DelayOffsetMs() = 0;
 
-    // Modifies settings for the AEC designed for mobile devices (AECM).
-    virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
-                            bool enableCNG = true) = 0;
+  // Modifies settings for the AEC designed for mobile devices (AECM).
+  virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+                          bool enableCNG = true) = 0;
 
-    // Gets settings for the AECM.
-    virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
+  // Gets settings for the AECM.
+  virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
 
-    // Enables a high pass filter on the capture signal. This removes DC bias
-    // and low-frequency noise. Recommended to be enabled.
-    virtual int EnableHighPassFilter(bool enable) = 0;
-    virtual bool IsHighPassFilterEnabled() = 0;
+  // Enables a high pass filter on the capture signal. This removes DC bias
+  // and low-frequency noise. Recommended to be enabled.
+  virtual int EnableHighPassFilter(bool enable) = 0;
+  virtual bool IsHighPassFilterEnabled() = 0;
 
-    // Sets status and mode of the receiving-side (Rx) NS.
-    // The Rx NS reduces noise in the received signal for the specified
-    // |channel|. Intended for advanced usage only.
-    virtual int SetRxNsStatus(int channel,
-                              bool enable,
-                              NsModes mode = kNsUnchanged) = 0;
+  // Sets status and mode of the receiving-side (Rx) NS.
+  // The Rx NS reduces noise in the received signal for the specified
+  // |channel|. Intended for advanced usage only.
+  virtual int SetRxNsStatus(int channel,
+                            bool enable,
+                            NsModes mode = kNsUnchanged) = 0;
 
-    // Gets status and mode of the receiving-side NS.
-    virtual int GetRxNsStatus(int channel,
-                              bool& enabled,
-                              NsModes& mode) = 0;
+  // Gets status and mode of the receiving-side NS.
+  virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode) = 0;
 
-    // Sets status and mode of the receiving-side (Rx) AGC.
-    // The Rx AGC adjusts the received signal to an appropriate level
-    // for the specified |channel|. Intended for advanced usage only.
-    virtual int SetRxAgcStatus(int channel,
-                               bool enable,
-                               AgcModes mode = kAgcUnchanged) = 0;
+  // Sets status and mode of the receiving-side (Rx) AGC.
+  // The Rx AGC adjusts the received signal to an appropriate level
+  // for the specified |channel|. Intended for advanced usage only.
+  virtual int SetRxAgcStatus(int channel,
+                             bool enable,
+                             AgcModes mode = kAgcUnchanged) = 0;
 
-    // Gets status and mode of the receiving-side AGC.
-    virtual int GetRxAgcStatus(int channel,
-                               bool& enabled,
-                               AgcModes& mode) = 0;
+  // Gets status and mode of the receiving-side AGC.
+  virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode) = 0;
 
-    // Modifies the AGC configuration on the receiving side for the
-    // specified |channel|.
-    virtual int SetRxAgcConfig(int channel, AgcConfig config) = 0;
+  // Modifies the AGC configuration on the receiving side for the
+  // specified |channel|.
+  virtual int SetRxAgcConfig(int channel, AgcConfig config) = 0;
 
-    // Gets the AGC configuration on the receiving side.
-    virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
+  // Gets the AGC configuration on the receiving side.
+  virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
 
-    // Registers a VoERxVadCallback |observer| instance and enables Rx VAD
-    // notifications for the specified |channel|.
-    virtual int RegisterRxVadObserver(int channel,
-                                      VoERxVadCallback &observer) = 0;
+  // Registers a VoERxVadCallback |observer| instance and enables Rx VAD
+  // notifications for the specified |channel|.
+  virtual int RegisterRxVadObserver(int channel,
+                                    VoERxVadCallback& observer) = 0;
 
-    // Deregisters the VoERxVadCallback |observer| and disables Rx VAD
-    // notifications for the specified |channel|.
-    virtual int DeRegisterRxVadObserver(int channel) = 0;
+  // Deregisters the VoERxVadCallback |observer| and disables Rx VAD
+  // notifications for the specified |channel|.
+  virtual int DeRegisterRxVadObserver(int channel) = 0;
 
-    // Gets the VAD/DTX activity for the specified |channel|.
-    // The returned value is 1 if frames of audio contains speech
-    // and 0 if silence. The output is always 1 if VAD is disabled.
-    virtual int VoiceActivityIndicator(int channel) = 0;
+  // Gets the VAD/DTX activity for the specified |channel|.
+  // The returned value is 1 if frames of audio contains speech
+  // and 0 if silence. The output is always 1 if VAD is disabled.
+  virtual int VoiceActivityIndicator(int channel) = 0;
 
-    // Enables or disables the possibility to retrieve echo metrics and delay
-    // logging values during an active call. The metrics are only supported in
-    // AEC.
-    virtual int SetEcMetricsStatus(bool enable) = 0;
+  // Enables or disables the possibility to retrieve echo metrics and delay
+  // logging values during an active call. The metrics are only supported in
+  // AEC.
+  virtual int SetEcMetricsStatus(bool enable) = 0;
 
-    // Gets the current EC metric status.
-    virtual int GetEcMetricsStatus(bool& enabled) = 0;
+  // Gets the current EC metric status.
+  virtual int GetEcMetricsStatus(bool& enabled) = 0;
 
-    // Gets the instantaneous echo level metrics.
-    virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
+  // Gets the instantaneous echo level metrics.
+  virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
 
-    // Gets the EC internal |delay_median| and |delay_std| in ms between
-    // near-end and far-end. The metric |fraction_poor_delays| is the amount of
-    // delay values that potentially can break the EC. The values are aggregated
-    // over one second and the last updated metrics are returned.
-    virtual int GetEcDelayMetrics(int& delay_median, int& delay_std,
-                                  float& fraction_poor_delays) = 0;
+  // Gets the EC internal |delay_median| and |delay_std| in ms between
+  // near-end and far-end. The metric |fraction_poor_delays| is the amount of
+  // delay values that potentially can break the EC. The values are aggregated
+  // over one second and the last updated metrics are returned.
+  virtual int GetEcDelayMetrics(int& delay_median,
+                                int& delay_std,
+                                float& fraction_poor_delays) = 0;
 
-    // Enables recording of Audio Processing (AP) debugging information.
-    // The file can later be used for off-line analysis of the AP performance.
-    virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
+  // Enables recording of Audio Processing (AP) debugging information.
+  // The file can later be used for off-line analysis of the AP performance.
+  virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
 
-    // Same as above but sets and uses an existing file handle. Takes ownership
-    // of |file_handle| and passes it on to the audio processing module.
-    virtual int StartDebugRecording(FILE* file_handle) = 0;
+  // Same as above but sets and uses an existing file handle. Takes ownership
+  // of |file_handle| and passes it on to the audio processing module.
+  virtual int StartDebugRecording(FILE* file_handle) = 0;
 
-    // Disables recording of AP debugging information.
-    virtual int StopDebugRecording() = 0;
+  // Disables recording of AP debugging information.
+  virtual int StopDebugRecording() = 0;
 
-    // Enables or disables detection of disturbing keyboard typing.
-    // An error notification will be given as a callback upon detection.
-    virtual int SetTypingDetectionStatus(bool enable) = 0;
+  // Enables or disables detection of disturbing keyboard typing.
+  // An error notification will be given as a callback upon detection.
+  virtual int SetTypingDetectionStatus(bool enable) = 0;
 
-    // Gets the current typing detection status.
-    virtual int GetTypingDetectionStatus(bool& enabled) = 0;
+  // Gets the current typing detection status.
+  virtual int GetTypingDetectionStatus(bool& enabled) = 0;
 
-    // Reports the lower of:
-    // * Time in seconds since the last typing event.
-    // * Time in seconds since the typing detection was enabled.
-    // Returns error if typing detection is disabled.
-    virtual int TimeSinceLastTyping(int &seconds) = 0;
+  // Reports the lower of:
+  // * Time in seconds since the last typing event.
+  // * Time in seconds since the typing detection was enabled.
+  // Returns error if typing detection is disabled.
+  virtual int TimeSinceLastTyping(int& seconds) = 0;
 
-    // Optional setting of typing detection parameters
-    // Parameter with value == 0 will be ignored
-    // and left with default config.
-    // TODO(niklase) Remove default argument as soon as libJingle is updated!
-    virtual int SetTypingDetectionParameters(int timeWindow,
-                                             int costPerTyping,
-                                             int reportingThreshold,
-                                             int penaltyDecay,
-                                             int typeEventDelay = 0) = 0;
+  // Optional setting of typing detection parameters
+  // Parameter with value == 0 will be ignored
+  // and left with default config.
+  // TODO(niklase) Remove default argument as soon as libJingle is updated!
+  virtual int SetTypingDetectionParameters(int timeWindow,
+                                           int costPerTyping,
+                                           int reportingThreshold,
+                                           int penaltyDecay,
+                                           int typeEventDelay = 0) = 0;
 
-    // Swaps the capture-side left and right audio channels when enabled. It
-    // only has an effect when using a stereo send codec. The setting is
-    // persistent; it will be applied whenever a stereo send codec is enabled.
-    //
-    // The swap is applied only to the captured audio, and not mixed files. The
-    // swap will appear in file recordings and when accessing audio through the
-    // external media interface.
-    virtual void EnableStereoChannelSwapping(bool enable) = 0;
-    virtual bool IsStereoChannelSwappingEnabled() = 0;
+  // Swaps the capture-side left and right audio channels when enabled. It
+  // only has an effect when using a stereo send codec. The setting is
+  // persistent; it will be applied whenever a stereo send codec is enabled.
+  //
+  // The swap is applied only to the captured audio, and not mixed files. The
+  // swap will appear in file recordings and when accessing audio through the
+  // external media interface.
+  virtual void EnableStereoChannelSwapping(bool enable) = 0;
+  virtual bool IsStereoChannelSwappingEnabled() = 0;
 
-protected:
-    VoEAudioProcessing() {}
-    virtual ~VoEAudioProcessing() {}
+ protected:
+  VoEAudioProcessing() {}
+  virtual ~VoEAudioProcessing() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_base.h b/webrtc/voice_engine/include/voe_base.h
index 4881939..7666a57 100644
--- a/webrtc/voice_engine/include/voe_base.h
+++ b/webrtc/voice_engine/include/voe_base.h
@@ -46,149 +46,150 @@
 const int kVoEDefault = -1;
 
 // VoiceEngineObserver
-class WEBRTC_DLLEXPORT VoiceEngineObserver
-{
-public:
-    // This method will be called after the occurrence of any runtime error
-    // code, or warning notification, when the observer interface has been
-    // installed using VoEBase::RegisterVoiceEngineObserver().
-    virtual void CallbackOnError(int channel, int errCode) = 0;
+class WEBRTC_DLLEXPORT VoiceEngineObserver {
+ public:
+  // This method will be called after the occurrence of any runtime error
+  // code, or warning notification, when the observer interface has been
+  // installed using VoEBase::RegisterVoiceEngineObserver().
+  virtual void CallbackOnError(int channel, int errCode) = 0;
 
-protected:
-    virtual ~VoiceEngineObserver() {}
+ protected:
+  virtual ~VoiceEngineObserver() {}
 };
 
 // VoiceEngine
-class WEBRTC_DLLEXPORT VoiceEngine
-{
-public:
-    // Creates a VoiceEngine object, which can then be used to acquire
-    // sub-APIs. Returns NULL on failure.
-    static VoiceEngine* Create();
-    static VoiceEngine* Create(const Config& config);
+class WEBRTC_DLLEXPORT VoiceEngine {
+ public:
+  // Creates a VoiceEngine object, which can then be used to acquire
+  // sub-APIs. Returns NULL on failure.
+  static VoiceEngine* Create();
+  static VoiceEngine* Create(const Config& config);
 
-    // Deletes a created VoiceEngine object and releases the utilized resources.
-    // Note that if there are outstanding references held via other interfaces,
-    // the voice engine instance will not actually be deleted until those
-    // references have been released.
-    static bool Delete(VoiceEngine*& voiceEngine);
+  // Deletes a created VoiceEngine object and releases the utilized resources.
+  // Note that if there are outstanding references held via other interfaces,
+  // the voice engine instance will not actually be deleted until those
+  // references have been released.
+  static bool Delete(VoiceEngine*& voiceEngine);
 
-    // Specifies the amount and type of trace information which will be
-    // created by the VoiceEngine.
-    static int SetTraceFilter(unsigned int filter);
+  // Specifies the amount and type of trace information which will be
+  // created by the VoiceEngine.
+  static int SetTraceFilter(unsigned int filter);
 
-    // Sets the name of the trace file and enables non-encrypted trace messages.
-    static int SetTraceFile(const char* fileNameUTF8,
-                            bool addFileCounter = false);
+  // Sets the name of the trace file and enables non-encrypted trace messages.
+  static int SetTraceFile(const char* fileNameUTF8,
+                          bool addFileCounter = false);
 
-    // Installs the TraceCallback implementation to ensure that the user
-    // receives callbacks for generated trace messages.
-    static int SetTraceCallback(TraceCallback* callback);
+  // Installs the TraceCallback implementation to ensure that the user
+  // receives callbacks for generated trace messages.
+  static int SetTraceCallback(TraceCallback* callback);
 
 #if !defined(WEBRTC_CHROMIUM_BUILD)
-    static int SetAndroidObjects(void* javaVM, void* context);
+  static int SetAndroidObjects(void* javaVM, void* context);
 #endif
 
-protected:
-    VoiceEngine() {}
-    ~VoiceEngine() {}
+ protected:
+  VoiceEngine() {}
+  ~VoiceEngine() {}
 };
 
 // VoEBase
-class WEBRTC_DLLEXPORT VoEBase
-{
-public:
-    // Factory for the VoEBase sub-API. Increases an internal reference
-    // counter if successful. Returns NULL if the API is not supported or if
-    // construction fails.
-    static VoEBase* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEBase {
+ public:
+  // Factory for the VoEBase sub-API. Increases an internal reference
+  // counter if successful. Returns NULL if the API is not supported or if
+  // construction fails.
+  static VoEBase* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEBase sub-API and decreases an internal reference
-    // counter. Returns the new reference count. This value should be zero
-    // for all sub-APIs before the VoiceEngine object can be safely deleted.
-    virtual int Release() = 0;
+  // Releases the VoEBase sub-API and decreases an internal reference
+  // counter. Returns the new reference count. This value should be zero
+  // for all sub-APIs before the VoiceEngine object can be safely deleted.
+  virtual int Release() = 0;
 
-    // Installs the observer class to enable runtime error control and
-    // warning notifications. Returns -1 in case of an error, 0 otherwise.
-    virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
+  // Installs the observer class to enable runtime error control and
+  // warning notifications. Returns -1 in case of an error, 0 otherwise.
+  virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
 
-    // Removes and disables the observer class for runtime error control
-    // and warning notifications. Returns 0.
-    virtual int DeRegisterVoiceEngineObserver() = 0;
+  // Removes and disables the observer class for runtime error control
+  // and warning notifications. Returns 0.
+  virtual int DeRegisterVoiceEngineObserver() = 0;
 
-    // Initializes all common parts of the VoiceEngine; e.g. all
-    // encoders/decoders, the sound card and core receiving components.
-    // This method also makes it possible to install some user-defined external
-    // modules:
-    // - The Audio Device Module (ADM) which implements all the audio layer
-    // functionality in a separate (reference counted) module.
-    // - The AudioProcessing module handles capture-side processing. VoiceEngine
-    // takes ownership of this object.
-    // If NULL is passed for any of these, VoiceEngine will create its own.
-    // Returns -1 in case of an error, 0 otherwise.
-    // TODO(ajm): Remove default NULLs.
-    virtual int Init(AudioDeviceModule* external_adm = NULL,
-                     AudioProcessing* audioproc = NULL) = 0;
+  // Initializes all common parts of the VoiceEngine; e.g. all
+  // encoders/decoders, the sound card and core receiving components.
+  // This method also makes it possible to install some user-defined external
+  // modules:
+  // - The Audio Device Module (ADM) which implements all the audio layer
+  // functionality in a separate (reference counted) module.
+  // - The AudioProcessing module handles capture-side processing. VoiceEngine
+  // takes ownership of this object.
+  // If NULL is passed for any of these, VoiceEngine will create its own.
+  // Returns -1 in case of an error, 0 otherwise.
+  // TODO(ajm): Remove default NULLs.
+  virtual int Init(AudioDeviceModule* external_adm = NULL,
+                   AudioProcessing* audioproc = NULL) = 0;
 
-    // Returns NULL before Init() is called.
-    virtual AudioProcessing* audio_processing() = 0;
+  // Returns NULL before Init() is called.
+  virtual AudioProcessing* audio_processing() = 0;
 
-    // Terminates all VoiceEngine functions and releases allocated resources.
-    // Returns 0.
-    virtual int Terminate() = 0;
+  // Terminates all VoiceEngine functions and releases allocated resources.
+  // Returns 0.
+  virtual int Terminate() = 0;
 
-    // Creates a new channel and allocates the required resources for it.
-    // One can use |config| to configure the channel. Currently that is used for
-    // choosing between ACM1 and ACM2, when creating Audio Coding Module.
-    // Returns channel ID or -1 in case of an error.
-    virtual int CreateChannel() = 0;
-    virtual int CreateChannel(const Config& config) = 0;
+  // Creates a new channel and allocates the required resources for it.
+  // One can use |config| to configure the channel. Currently that is used for
+  // choosing between ACM1 and ACM2, when creating Audio Coding Module.
+  // Returns channel ID or -1 in case of an error.
+  virtual int CreateChannel() = 0;
+  virtual int CreateChannel(const Config& config) = 0;
 
-    // Deletes an existing channel and releases the utilized resources.
-    // Returns -1 in case of an error, 0 otherwise.
-    virtual int DeleteChannel(int channel) = 0;
+  // Deletes an existing channel and releases the utilized resources.
+  // Returns -1 in case of an error, 0 otherwise.
+  virtual int DeleteChannel(int channel) = 0;
 
-    // Prepares and initiates the VoiceEngine for reception of
-    // incoming RTP/RTCP packets on the specified |channel|.
-    virtual int StartReceive(int channel) = 0;
+  // Prepares and initiates the VoiceEngine for reception of
+  // incoming RTP/RTCP packets on the specified |channel|.
+  virtual int StartReceive(int channel) = 0;
 
-    // Stops receiving incoming RTP/RTCP packets on the specified |channel|.
-    virtual int StopReceive(int channel) = 0;
+  // Stops receiving incoming RTP/RTCP packets on the specified |channel|.
+  virtual int StopReceive(int channel) = 0;
 
-    // Starts forwarding the packets to the mixer/soundcard for a
-    // specified |channel|.
-    virtual int StartPlayout(int channel) = 0;
+  // Starts forwarding the packets to the mixer/soundcard for a
+  // specified |channel|.
+  virtual int StartPlayout(int channel) = 0;
 
-    // Stops forwarding the packets to the mixer/soundcard for a
-    // specified |channel|.
-    virtual int StopPlayout(int channel) = 0;
+  // Stops forwarding the packets to the mixer/soundcard for a
+  // specified |channel|.
+  virtual int StopPlayout(int channel) = 0;
 
-    // Starts sending packets to an already specified IP address and
-    // port number for a specified |channel|.
-    virtual int StartSend(int channel) = 0;
+  // Starts sending packets to an already specified IP address and
+  // port number for a specified |channel|.
+  virtual int StartSend(int channel) = 0;
 
-    // Stops sending packets from a specified |channel|.
-    virtual int StopSend(int channel) = 0;
+  // Stops sending packets from a specified |channel|.
+  virtual int StopSend(int channel) = 0;
 
-    // Gets the version information for VoiceEngine and its components.
-    virtual int GetVersion(char version[1024]) = 0;
+  // Gets the version information for VoiceEngine and its components.
+  virtual int GetVersion(char version[1024]) = 0;
 
-    // Gets the last VoiceEngine error code.
-    virtual int LastError() = 0;
+  // Gets the last VoiceEngine error code.
+  virtual int LastError() = 0;
 
-    // TODO(xians): Make the interface pure virtual after libjingle
-    // implements the interface in its FakeWebRtcVoiceEngine.
-    virtual AudioTransport* audio_transport() { return NULL; }
+  // TODO(xians): Make the interface pure virtual after libjingle
+  // implements the interface in its FakeWebRtcVoiceEngine.
+  virtual AudioTransport* audio_transport() { return NULL; }
 
-    // To be removed. Don't use.
-    virtual int SetOnHoldStatus(int channel, bool enable,
-        OnHoldModes mode = kHoldSendAndPlay) { return -1; }
-    virtual int GetOnHoldStatus(int channel, bool& enabled,
-        OnHoldModes& mode) { return -1; }
+  // To be removed. Don't use.
+  virtual int SetOnHoldStatus(int channel,
+                              bool enable,
+                              OnHoldModes mode = kHoldSendAndPlay) {
+    return -1;
+  }
+  virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode) {
+    return -1;
+  }
 
-protected:
-    VoEBase() {}
-    virtual ~VoEBase() {}
+ protected:
+  VoEBase() {}
+  virtual ~VoEBase() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_codec.h b/webrtc/voice_engine/include/voe_codec.h
index 1e7b97f..dc3b88c 100644
--- a/webrtc/voice_engine/include/voe_codec.h
+++ b/webrtc/voice_engine/include/voe_codec.h
@@ -37,109 +37,116 @@
 
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoECodec
-{
-public:
-    // Factory for the VoECodec sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoECodec* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoECodec {
+ public:
+  // Factory for the VoECodec sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoECodec* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoECodec sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoECodec sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Gets the number of supported codecs.
-    virtual int NumOfCodecs() = 0;
+  // Gets the number of supported codecs.
+  virtual int NumOfCodecs() = 0;
 
-    // Get the |codec| information for a specified list |index|.
-    virtual int GetCodec(int index, CodecInst& codec) = 0;
+  // Get the |codec| information for a specified list |index|.
+  virtual int GetCodec(int index, CodecInst& codec) = 0;
 
-    // Sets the |codec| for the |channel| to be used for sending.
-    virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
+  // Sets the |codec| for the |channel| to be used for sending.
+  virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
 
-    // Gets the |codec| parameters for the sending codec on a specified
-    // |channel|.
-    virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
+  // Gets the |codec| parameters for the sending codec on a specified
+  // |channel|.
+  virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
 
-    // Sets the bitrate on a specified |channel| to the specified value
-    // (in bits/sec). If the value is not supported by the codec, the codec will
-    // choose an appropriate value.
-    // Returns -1 on failure and 0 on success.
-    virtual int SetBitRate(int channel, int bitrate_bps) = 0;
+  // Sets the bitrate on a specified |channel| to the specified value
+  // (in bits/sec). If the value is not supported by the codec, the codec will
+  // choose an appropriate value.
+  // Returns -1 on failure and 0 on success.
+  virtual int SetBitRate(int channel, int bitrate_bps) = 0;
 
-    // Gets the currently received |codec| for a specific |channel|.
-    virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
+  // Gets the currently received |codec| for a specific |channel|.
+  virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
 
-    // Sets the dynamic payload type number for a particular |codec| or
-    // disables (ignores) a codec for receiving. For instance, when receiving
-    // an invite from a SIP-based client, this function can be used to change
-    // the dynamic payload type number to match that in the INVITE SDP-
-    // message. The utilized parameters in the |codec| structure are:
-    // plname, plfreq, pltype and channels.
-    virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
+  // Sets the dynamic payload type number for a particular |codec| or
+  // disables (ignores) a codec for receiving. For instance, when receiving
+  // an invite from a SIP-based client, this function can be used to change
+  // the dynamic payload type number to match that in the INVITE SDP-
+  // message. The utilized parameters in the |codec| structure are:
+  // plname, plfreq, pltype and channels.
+  virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
 
-    // Gets the actual payload type that is set for receiving a |codec| on a
-    // |channel|. The value it retrieves will either be the default payload
-    // type, or a value earlier set with SetRecPayloadType().
-    virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
+  // Gets the actual payload type that is set for receiving a |codec| on a
+  // |channel|. The value it retrieves will either be the default payload
+  // type, or a value earlier set with SetRecPayloadType().
+  virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
 
-    // Sets the payload |type| for the sending of SID-frames with background
-    // noise estimation during silence periods detected by the VAD.
-    virtual int SetSendCNPayloadType(
-        int channel, int type, PayloadFrequencies frequency = kFreq16000Hz) = 0;
+  // Sets the payload |type| for the sending of SID-frames with background
+  // noise estimation during silence periods detected by the VAD.
+  virtual int SetSendCNPayloadType(
+      int channel,
+      int type,
+      PayloadFrequencies frequency = kFreq16000Hz) = 0;
 
-    // Sets the codec internal FEC (forward error correction) status for a
-    // specified |channel|. Returns 0 if success, and -1 if failed.
-    // TODO(minyue): Make SetFECStatus() pure virtual when fakewebrtcvoiceengine
-    // in talk is ready.
-    virtual int SetFECStatus(int channel, bool enable) { return -1; }
+  // Sets the codec internal FEC (forward error correction) status for a
+  // specified |channel|. Returns 0 if success, and -1 if failed.
+  // TODO(minyue): Make SetFECStatus() pure virtual when fakewebrtcvoiceengine
+  // in talk is ready.
+  virtual int SetFECStatus(int channel, bool enable) { return -1; }
 
-    // Gets the codec internal FEC status for a specified |channel|. Returns 0
-    // with the status stored in |enabled| if success, and -1 if encountered
-    // error.
-    // TODO(minyue): Make GetFECStatus() pure virtual when fakewebrtcvoiceengine
-    // in talk is ready.
-    virtual int GetFECStatus(int channel, bool& enabled) { return -1; }
+  // Gets the codec internal FEC status for a specified |channel|. Returns 0
+  // with the status stored in |enabled| if success, and -1 if encountered
+  // error.
+  // TODO(minyue): Make GetFECStatus() pure virtual when fakewebrtcvoiceengine
+  // in talk is ready.
+  virtual int GetFECStatus(int channel, bool& enabled) { return -1; }
 
-    // Sets the VAD/DTX (silence suppression) status and |mode| for a
-    // specified |channel|. Disabling VAD (through |enable|) will also disable
-    // DTX; it is not necessary to explictly set |disableDTX| in this case.
-    virtual int SetVADStatus(int channel, bool enable,
-                             VadModes mode = kVadConventional,
-                             bool disableDTX = false) = 0;
+  // Sets the VAD/DTX (silence suppression) status and |mode| for a
+  // specified |channel|. Disabling VAD (through |enable|) will also disable
+  // DTX; it is not necessary to explictly set |disableDTX| in this case.
+  virtual int SetVADStatus(int channel,
+                           bool enable,
+                           VadModes mode = kVadConventional,
+                           bool disableDTX = false) = 0;
 
-    // Gets the VAD/DTX status and |mode| for a specified |channel|.
-    virtual int GetVADStatus(int channel, bool& enabled, VadModes& mode,
-                             bool& disabledDTX) = 0;
+  // Gets the VAD/DTX status and |mode| for a specified |channel|.
+  virtual int GetVADStatus(int channel,
+                           bool& enabled,
+                           VadModes& mode,
+                           bool& disabledDTX) = 0;
 
-    // If send codec is Opus on a specified |channel|, sets the maximum playback
-    // rate the receiver will render: |frequency_hz| (in Hz).
-    // TODO(minyue): Make SetOpusMaxPlaybackRate() pure virtual when
-    // fakewebrtcvoiceengine in talk is ready.
-    virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz) {
-      return -1;
-    }
+  // If send codec is Opus on a specified |channel|, sets the maximum playback
+  // rate the receiver will render: |frequency_hz| (in Hz).
+  // TODO(minyue): Make SetOpusMaxPlaybackRate() pure virtual when
+  // fakewebrtcvoiceengine in talk is ready.
+  virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz) {
+    return -1;
+  }
 
-    // If send codec is Opus on a specified |channel|, set its DTX. Returns 0 if
-    // success, and -1 if failed.
-    virtual int SetOpusDtx(int channel, bool enable_dtx) = 0;
+  // If send codec is Opus on a specified |channel|, set its DTX. Returns 0 if
+  // success, and -1 if failed.
+  virtual int SetOpusDtx(int channel, bool enable_dtx) = 0;
 
-    // Don't use. To be removed.
-    virtual int SetAMREncFormat(int channel, AmrMode mode) { return -1; }
-    virtual int SetAMRDecFormat(int channel, AmrMode mode) { return -1; }
-    virtual int SetAMRWbEncFormat(int channel, AmrMode mode) { return -1; }
-    virtual int SetAMRWbDecFormat(int channel, AmrMode mode) { return -1; }
-    virtual int SetISACInitTargetRate(int channel, int rateBps,
-            bool useFixedFrameSize = false) { return -1; }
-    virtual int SetISACMaxRate(int channel, int rateBps) { return -1; }
-    virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) { return -1; }
+  // Don't use. To be removed.
+  virtual int SetAMREncFormat(int channel, AmrMode mode) { return -1; }
+  virtual int SetAMRDecFormat(int channel, AmrMode mode) { return -1; }
+  virtual int SetAMRWbEncFormat(int channel, AmrMode mode) { return -1; }
+  virtual int SetAMRWbDecFormat(int channel, AmrMode mode) { return -1; }
+  virtual int SetISACInitTargetRate(int channel,
+                                    int rateBps,
+                                    bool useFixedFrameSize = false) {
+    return -1;
+  }
+  virtual int SetISACMaxRate(int channel, int rateBps) { return -1; }
+  virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) { return -1; }
 
-protected:
-    VoECodec() {}
-    virtual ~VoECodec() {}
+ protected:
+  VoECodec() {}
+  virtual ~VoECodec() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_dtmf.h b/webrtc/voice_engine/include/voe_dtmf.h
index 4fd4496..64d758d 100644
--- a/webrtc/voice_engine/include/voe_dtmf.h
+++ b/webrtc/voice_engine/include/voe_dtmf.h
@@ -40,57 +40,57 @@
 class VoiceEngine;
 
 // VoEDtmf
-class WEBRTC_DLLEXPORT VoEDtmf
-{
-public:
+class WEBRTC_DLLEXPORT VoEDtmf {
+ public:
+  // Factory for the VoEDtmf sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
 
-    // Factory for the VoEDtmf sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
+  // Releases the VoEDtmf sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Releases the VoEDtmf sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Sends telephone events either in-band or out-of-band.
+  virtual int SendTelephoneEvent(int channel,
+                                 int eventCode,
+                                 bool outOfBand = true,
+                                 int lengthMs = 160,
+                                 int attenuationDb = 10) = 0;
 
-    // Sends telephone events either in-band or out-of-band.
-    virtual int SendTelephoneEvent(int channel, int eventCode,
-                                   bool outOfBand = true, int lengthMs = 160,
-                                   int attenuationDb = 10) = 0;
+  // Sets the dynamic payload |type| that should be used for telephone
+  // events.
+  virtual int SetSendTelephoneEventPayloadType(int channel,
+                                               unsigned char type) = 0;
 
+  // Gets the currently set dynamic payload |type| for telephone events.
+  virtual int GetSendTelephoneEventPayloadType(int channel,
+                                               unsigned char& type) = 0;
 
-    // Sets the dynamic payload |type| that should be used for telephone
-    // events.
-    virtual int SetSendTelephoneEventPayloadType(int channel,
-                                                 unsigned char type) = 0;
+  // Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
+  // is played out on the speaker.
+  virtual int SetDtmfFeedbackStatus(bool enable,
+                                    bool directFeedback = false) = 0;
 
+  // Gets the DTMF feedback status.
+  virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
 
-    // Gets the currently set dynamic payload |type| for telephone events.
-    virtual int GetSendTelephoneEventPayloadType(int channel,
-                                                 unsigned char& type) = 0;
+  // Plays a DTMF feedback tone (only locally).
+  virtual int PlayDtmfTone(int eventCode,
+                           int lengthMs = 200,
+                           int attenuationDb = 10) = 0;
 
-    // Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
-    // is played out on the speaker.
-    virtual int SetDtmfFeedbackStatus(bool enable,
-                                      bool directFeedback = false) = 0;
+  // To be removed. Don't use.
+  virtual int StartPlayingDtmfTone(int eventCode, int attenuationDb = 10) {
+    return -1;
+  }
+  virtual int StopPlayingDtmfTone() { return -1; }
 
-    // Gets the DTMF feedback status.
-    virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
-
-    // Plays a DTMF feedback tone (only locally).
-    virtual int PlayDtmfTone(int eventCode, int lengthMs = 200,
-                             int attenuationDb = 10) = 0;
-
-    // To be removed. Don't use.
-    virtual int StartPlayingDtmfTone(int eventCode,
-        int attenuationDb = 10) { return -1; }
-    virtual int StopPlayingDtmfTone() { return -1; }
-
-protected:
-    VoEDtmf() {}
-    virtual ~VoEDtmf() {}
+ protected:
+  VoEDtmf() {}
+  virtual ~VoEDtmf() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_external_media.h b/webrtc/voice_engine/include/voe_external_media.h
index bcfd81b..48c564b 100644
--- a/webrtc/voice_engine/include/voe_external_media.h
+++ b/webrtc/voice_engine/include/voe_external_media.h
@@ -17,73 +17,83 @@
 class VoiceEngine;
 class AudioFrame;
 
-class WEBRTC_DLLEXPORT VoEMediaProcess
-{
-public:
-    // The VoiceEngine user should override the Process() method in a
-    // derived class. Process() will be called when audio is ready to
-    // be processed. The audio can be accessed in several different modes
-    // given by the |type| parameter. The function should modify the
-    // original data and ensure that it is copied back to the |audio10ms|
-    // array. The number of samples in the frame cannot be changed.
-    // The sampling frequency will depend upon the codec used.
-    // If |isStereo| is true, audio10ms will contain 16-bit PCM data
-    // samples in interleaved stereo format (L0,R0,L1,R1,...).
-    virtual void Process(int channel, ProcessingTypes type,
-                         int16_t audio10ms[], int length,
-                         int samplingFreq, bool isStereo) = 0;
+class WEBRTC_DLLEXPORT VoEMediaProcess {
+ public:
+  // The VoiceEngine user should override the Process() method in a
+  // derived class. Process() will be called when audio is ready to
+  // be processed. The audio can be accessed in several different modes
+  // given by the |type| parameter. The function should modify the
+  // original data and ensure that it is copied back to the |audio10ms|
+  // array. The number of samples in the frame cannot be changed.
+  // The sampling frequency will depend upon the codec used.
+  // If |isStereo| is true, audio10ms will contain 16-bit PCM data
+  // samples in interleaved stereo format (L0,R0,L1,R1,...).
+  virtual void Process(int channel,
+                       ProcessingTypes type,
+                       int16_t audio10ms[],
+                       int length,
+                       int samplingFreq,
+                       bool isStereo) = 0;
 
-protected:
-    virtual ~VoEMediaProcess() {}
+ protected:
+  virtual ~VoEMediaProcess() {}
 };
 
-class WEBRTC_DLLEXPORT VoEExternalMedia
-{
-public:
-    // Factory for the VoEExternalMedia sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEExternalMedia {
+ public:
+  // Factory for the VoEExternalMedia sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEExternalMedia sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEExternalMedia sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Installs a VoEMediaProcess derived instance and activates external
-    // media for the specified |channel| and |type|.
-    virtual int RegisterExternalMediaProcessing(
-        int channel, ProcessingTypes type, VoEMediaProcess& processObject) = 0;
+  // Installs a VoEMediaProcess derived instance and activates external
+  // media for the specified |channel| and |type|.
+  virtual int RegisterExternalMediaProcessing(
+      int channel,
+      ProcessingTypes type,
+      VoEMediaProcess& processObject) = 0;
 
-    // Removes the VoEMediaProcess derived instance and deactivates external
-    // media for the specified |channel| and |type|.
-    virtual int DeRegisterExternalMediaProcessing(
-        int channel, ProcessingTypes type) = 0;
+  // Removes the VoEMediaProcess derived instance and deactivates external
+  // media for the specified |channel| and |type|.
+  virtual int DeRegisterExternalMediaProcessing(int channel,
+                                                ProcessingTypes type) = 0;
 
-    // Pulls an audio frame from the specified |channel| for external mixing.
-    // If the |desired_sample_rate_hz| is 0, the signal will be returned with
-    // its native frequency, otherwise it will be resampled. Valid frequencies
-    // are 16, 22, 32, 44 or 48 kHz.
-    virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
-                              AudioFrame* frame) = 0;
+  // Pulls an audio frame from the specified |channel| for external mixing.
+  // If the |desired_sample_rate_hz| is 0, the signal will be returned with
+  // its native frequency, otherwise it will be resampled. Valid frequencies
+  // are 16, 22, 32, 44 or 48 kHz.
+  virtual int GetAudioFrame(int channel,
+                            int desired_sample_rate_hz,
+                            AudioFrame* frame) = 0;
 
-    // Sets the state of external mixing. Cannot be changed during playback.
-    virtual int SetExternalMixing(int channel, bool enable) = 0;
+  // Sets the state of external mixing. Cannot be changed during playback.
+  virtual int SetExternalMixing(int channel, bool enable) = 0;
 
-    // Don't use. To be removed.
-    virtual int SetExternalRecordingStatus(bool enable) { return -1; }
-    virtual int SetExternalPlayoutStatus(bool enable) { return -1; }
-    virtual int ExternalRecordingInsertData(
-        const int16_t speechData10ms[], int lengthSamples,
-        int samplingFreqHz, int current_delay_ms) { return -1; }
-    virtual int ExternalPlayoutGetData(
-        int16_t speechData10ms[], int samplingFreqHz,
-        int current_delay_ms, int& lengthSamples) { return -1; }
+  // Don't use. To be removed.
+  virtual int SetExternalRecordingStatus(bool enable) { return -1; }
+  virtual int SetExternalPlayoutStatus(bool enable) { return -1; }
+  virtual int ExternalRecordingInsertData(const int16_t speechData10ms[],
+                                          int lengthSamples,
+                                          int samplingFreqHz,
+                                          int current_delay_ms) {
+    return -1;
+  }
+  virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
+                                     int samplingFreqHz,
+                                     int current_delay_ms,
+                                     int& lengthSamples) {
+    return -1;
+  }
 
-protected:
-    VoEExternalMedia() {}
-    virtual ~VoEExternalMedia() {}
+ protected:
+  VoEExternalMedia() {}
+  virtual ~VoEExternalMedia() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_file.h b/webrtc/voice_engine/include/voe_file.h
index bd14284..e48a535 100644
--- a/webrtc/voice_engine/include/voe_file.h
+++ b/webrtc/voice_engine/include/voe_file.h
@@ -44,124 +44,142 @@
 
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoEFile
-{
-public:
-    // Factory for the VoEFile sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEFile* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEFile {
+ public:
+  // Factory for the VoEFile sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEFile* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEFile sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEFile sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Starts playing and mixing files with the local speaker signal for
-    // playout.
-    virtual int StartPlayingFileLocally(
-        int channel,
-        const char fileNameUTF8[1024],
-        bool loop = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0,
-        int startPointMs = 0,
-        int stopPointMs = 0) = 0;
+  // Starts playing and mixing files with the local speaker signal for
+  // playout.
+  virtual int StartPlayingFileLocally(
+      int channel,
+      const char fileNameUTF8[1024],
+      bool loop = false,
+      FileFormats format = kFileFormatPcm16kHzFile,
+      float volumeScaling = 1.0,
+      int startPointMs = 0,
+      int stopPointMs = 0) = 0;
 
-    // Starts playing and mixing streams with the local speaker signal for
-    // playout.
-    virtual int StartPlayingFileLocally(
-        int channel,
-        InStream* stream,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0,
-        int startPointMs = 0, int stopPointMs = 0) = 0;
+  // Starts playing and mixing streams with the local speaker signal for
+  // playout.
+  virtual int StartPlayingFileLocally(
+      int channel,
+      InStream* stream,
+      FileFormats format = kFileFormatPcm16kHzFile,
+      float volumeScaling = 1.0,
+      int startPointMs = 0,
+      int stopPointMs = 0) = 0;
 
-    // Stops playback of a file on a specific |channel|.
-    virtual int StopPlayingFileLocally(int channel) = 0;
+  // Stops playback of a file on a specific |channel|.
+  virtual int StopPlayingFileLocally(int channel) = 0;
 
-    // Returns the current file playing state for a specific |channel|.
-    virtual int IsPlayingFileLocally(int channel) = 0;
+  // Returns the current file playing state for a specific |channel|.
+  virtual int IsPlayingFileLocally(int channel) = 0;
 
-    // Starts reading data from a file and transmits the data either
-    // mixed with or instead of the microphone signal.
-    virtual int StartPlayingFileAsMicrophone(
-        int channel,
-        const char fileNameUTF8[1024],
-        bool loop = false ,
-        bool mixWithMicrophone = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0) = 0;
+  // Starts reading data from a file and transmits the data either
+  // mixed with or instead of the microphone signal.
+  virtual int StartPlayingFileAsMicrophone(
+      int channel,
+      const char fileNameUTF8[1024],
+      bool loop = false,
+      bool mixWithMicrophone = false,
+      FileFormats format = kFileFormatPcm16kHzFile,
+      float volumeScaling = 1.0) = 0;
 
-    // Starts reading data from a stream and transmits the data either
-    // mixed with or instead of the microphone signal.
-    virtual int StartPlayingFileAsMicrophone(
-        int channel,
-        InStream* stream,
-        bool mixWithMicrophone = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0) = 0;
+  // Starts reading data from a stream and transmits the data either
+  // mixed with or instead of the microphone signal.
+  virtual int StartPlayingFileAsMicrophone(
+      int channel,
+      InStream* stream,
+      bool mixWithMicrophone = false,
+      FileFormats format = kFileFormatPcm16kHzFile,
+      float volumeScaling = 1.0) = 0;
 
-    // Stops playing of a file as microphone signal for a specific |channel|.
-    virtual int StopPlayingFileAsMicrophone(int channel) = 0;
+  // Stops playing of a file as microphone signal for a specific |channel|.
+  virtual int StopPlayingFileAsMicrophone(int channel) = 0;
 
-    // Returns whether the |channel| is currently playing a file as microphone.
-    virtual int IsPlayingFileAsMicrophone(int channel) = 0;
+  // Returns whether the |channel| is currently playing a file as microphone.
+  virtual int IsPlayingFileAsMicrophone(int channel) = 0;
 
-    // Starts recording the mixed playout audio.
-    virtual int StartRecordingPlayout(int channel,
-                                      const char* fileNameUTF8,
-                                      CodecInst* compression = NULL,
-                                      int maxSizeBytes = -1) = 0;
+  // Starts recording the mixed playout audio.
+  virtual int StartRecordingPlayout(int channel,
+                                    const char* fileNameUTF8,
+                                    CodecInst* compression = NULL,
+                                    int maxSizeBytes = -1) = 0;
 
-    // Stops recording the mixed playout audio.
-    virtual int StopRecordingPlayout(int channel) = 0;
+  // Stops recording the mixed playout audio.
+  virtual int StopRecordingPlayout(int channel) = 0;
 
-    virtual int StartRecordingPlayout(int channel,
-                                      OutStream* stream,
-                                      CodecInst* compression = NULL) = 0;
+  virtual int StartRecordingPlayout(int channel,
+                                    OutStream* stream,
+                                    CodecInst* compression = NULL) = 0;
 
-    // Starts recording the microphone signal to a file.
-    virtual int StartRecordingMicrophone(const char* fileNameUTF8,
-                                         CodecInst* compression = NULL,
-                                         int maxSizeBytes = -1) = 0;
+  // Starts recording the microphone signal to a file.
+  virtual int StartRecordingMicrophone(const char* fileNameUTF8,
+                                       CodecInst* compression = NULL,
+                                       int maxSizeBytes = -1) = 0;
 
-    // Starts recording the microphone signal to a stream.
-    virtual int StartRecordingMicrophone(OutStream* stream,
-                                         CodecInst* compression = NULL) = 0;
+  // Starts recording the microphone signal to a stream.
+  virtual int StartRecordingMicrophone(OutStream* stream,
+                                       CodecInst* compression = NULL) = 0;
 
-    // Stops recording the microphone signal.
-    virtual int StopRecordingMicrophone() = 0;
+  // Stops recording the microphone signal.
+  virtual int StopRecordingMicrophone() = 0;
 
-    // Don't use. To be removed.
-    virtual int ScaleLocalFilePlayout(int channel, float scale) { return -1; }
-    virtual int ScaleFileAsMicrophonePlayout(
-            int channel, float scale) { return -1; }
-    virtual int GetFileDuration(const char* fileNameUTF8, int& durationMs,
-            FileFormats format = kFileFormatPcm16kHzFile) { return -1; }
-    virtual int GetPlaybackPosition(int channel, int& positionMs) { return -1; }
-    virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
-                                const char* fileNameOutUTF8) { return -1; }
-    virtual int ConvertPCMToWAV(InStream* streamIn,
-                                OutStream* streamOut) { return -1; }
-    virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
-                                const char* fileNameOutUTF8) { return -1; }
-    virtual int ConvertWAVToPCM(InStream* streamIn,
-                                OutStream* streamOut) { return -1; }
-    virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
-                                       const char* fileNameOutUTF8,
-                                       CodecInst* compression) { return -1; }
-    virtual int ConvertPCMToCompressed(InStream* streamIn,
-                                       OutStream* streamOut,
-                                       CodecInst* compression) { return -1; }
-    virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
-            const char* fileNameOutUTF8) { return -1; }
-    virtual int ConvertCompressedToPCM(InStream* streamIn,
-                                       OutStream* streamOut) { return -1; }
-protected:
-    VoEFile() {}
-    virtual ~VoEFile() {}
+  // Don't use. To be removed.
+  virtual int ScaleLocalFilePlayout(int channel, float scale) { return -1; }
+  virtual int ScaleFileAsMicrophonePlayout(int channel, float scale) {
+    return -1;
+  }
+  virtual int GetFileDuration(const char* fileNameUTF8,
+                              int& durationMs,
+                              FileFormats format = kFileFormatPcm16kHzFile) {
+    return -1;
+  }
+  virtual int GetPlaybackPosition(int channel, int& positionMs) { return -1; }
+  virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
+                              const char* fileNameOutUTF8) {
+    return -1;
+  }
+  virtual int ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut) {
+    return -1;
+  }
+  virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
+                              const char* fileNameOutUTF8) {
+    return -1;
+  }
+  virtual int ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut) {
+    return -1;
+  }
+  virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
+                                     const char* fileNameOutUTF8,
+                                     CodecInst* compression) {
+    return -1;
+  }
+  virtual int ConvertPCMToCompressed(InStream* streamIn,
+                                     OutStream* streamOut,
+                                     CodecInst* compression) {
+    return -1;
+  }
+  virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
+                                     const char* fileNameOutUTF8) {
+    return -1;
+  }
+  virtual int ConvertCompressedToPCM(InStream* streamIn, OutStream* streamOut) {
+    return -1;
+  }
+
+ protected:
+  VoEFile() {}
+  virtual ~VoEFile() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_hardware.h b/webrtc/voice_engine/include/voe_hardware.h
index db9bc56..1c6b7fc 100644
--- a/webrtc/voice_engine/include/voe_hardware.h
+++ b/webrtc/voice_engine/include/voe_hardware.h
@@ -38,75 +38,79 @@
 
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoEHardware
-{
-public:
-    // Factory for the VoEHardware sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEHardware {
+ public:
+  // Factory for the VoEHardware sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEHardware sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEHardware sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Gets the number of audio devices available for recording.
-    virtual int GetNumOfRecordingDevices(int& devices) = 0;
+  // Gets the number of audio devices available for recording.
+  virtual int GetNumOfRecordingDevices(int& devices) = 0;
 
-    // Gets the number of audio devices available for playout.
-    virtual int GetNumOfPlayoutDevices(int& devices) = 0;
+  // Gets the number of audio devices available for playout.
+  virtual int GetNumOfPlayoutDevices(int& devices) = 0;
 
-    // Gets the name of a specific recording device given by an |index|.
-    // On Windows Vista/7, it also retrieves an additional unique ID
-    // (GUID) for the recording device.
-    virtual int GetRecordingDeviceName(int index, char strNameUTF8[128],
-                                       char strGuidUTF8[128]) = 0;
-
-    // Gets the name of a specific playout device given by an |index|.
-    // On Windows Vista/7, it also retrieves an additional unique ID
-    // (GUID) for the playout device.
-    virtual int GetPlayoutDeviceName(int index, char strNameUTF8[128],
+  // Gets the name of a specific recording device given by an |index|.
+  // On Windows Vista/7, it also retrieves an additional unique ID
+  // (GUID) for the recording device.
+  virtual int GetRecordingDeviceName(int index,
+                                     char strNameUTF8[128],
                                      char strGuidUTF8[128]) = 0;
 
-    // Sets the audio device used for recording.
-    virtual int SetRecordingDevice(
-        int index, StereoChannel recordingChannel = kStereoBoth) = 0;
+  // Gets the name of a specific playout device given by an |index|.
+  // On Windows Vista/7, it also retrieves an additional unique ID
+  // (GUID) for the playout device.
+  virtual int GetPlayoutDeviceName(int index,
+                                   char strNameUTF8[128],
+                                   char strGuidUTF8[128]) = 0;
 
-    // Sets the audio device used for playout.
-    virtual int SetPlayoutDevice(int index) = 0;
+  // Sets the audio device used for recording.
+  virtual int SetRecordingDevice(
+      int index,
+      StereoChannel recordingChannel = kStereoBoth) = 0;
 
-    // Sets the type of audio device layer to use.
-    virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
+  // Sets the audio device used for playout.
+  virtual int SetPlayoutDevice(int index) = 0;
 
-    // Gets the currently used (active) audio device layer.
-    virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
+  // Sets the type of audio device layer to use.
+  virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
 
-    // Native sample rate controls (samples/sec)
-    virtual int SetRecordingSampleRate(unsigned int samples_per_sec) = 0;
-    virtual int RecordingSampleRate(unsigned int* samples_per_sec) const = 0;
-    virtual int SetPlayoutSampleRate(unsigned int samples_per_sec) = 0;
-    virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const = 0;
+  // Gets the currently used (active) audio device layer.
+  virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
 
-    virtual bool BuiltInAECIsAvailable() const = 0;
-    virtual int EnableBuiltInAEC(bool enable) = 0;
+  // Native sample rate controls (samples/sec)
+  virtual int SetRecordingSampleRate(unsigned int samples_per_sec) = 0;
+  virtual int RecordingSampleRate(unsigned int* samples_per_sec) const = 0;
+  virtual int SetPlayoutSampleRate(unsigned int samples_per_sec) = 0;
+  virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const = 0;
 
-    // To be removed. Don't use.
-    virtual bool BuiltInAECIsEnabled() const { return false; }
-    virtual int GetRecordingDeviceStatus(bool& isAvailable) { return -1; }
-    virtual int GetPlayoutDeviceStatus(bool& isAvailable) { return -1; }
-    virtual int ResetAudioDevice() { return -1; }
-    virtual int AudioDeviceControl(unsigned int par1, unsigned int par2,
-            unsigned int par3) { return -1; }
-    virtual int SetLoudspeakerStatus(bool enable) { return -1; }
-    virtual int GetLoudspeakerStatus(bool& enabled) { return -1; }
-    virtual int GetCPULoad(int& loadPercent) { return -1; }
+  virtual bool BuiltInAECIsAvailable() const = 0;
+  virtual int EnableBuiltInAEC(bool enable) = 0;
 
+  // To be removed. Don't use.
+  virtual bool BuiltInAECIsEnabled() const { return false; }
+  virtual int GetRecordingDeviceStatus(bool& isAvailable) { return -1; }
+  virtual int GetPlayoutDeviceStatus(bool& isAvailable) { return -1; }
+  virtual int ResetAudioDevice() { return -1; }
+  virtual int AudioDeviceControl(unsigned int par1,
+                                 unsigned int par2,
+                                 unsigned int par3) {
+    return -1;
+  }
+  virtual int SetLoudspeakerStatus(bool enable) { return -1; }
+  virtual int GetLoudspeakerStatus(bool& enabled) { return -1; }
+  virtual int GetCPULoad(int& loadPercent) { return -1; }
 
-protected:
-    VoEHardware() {}
-    virtual ~VoEHardware() {}
+ protected:
+  VoEHardware() {}
+  virtual ~VoEHardware() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_neteq_stats.h b/webrtc/voice_engine/include/voe_neteq_stats.h
index 1e8c240..fb70cae 100644
--- a/webrtc/voice_engine/include/voe_neteq_stats.h
+++ b/webrtc/voice_engine/include/voe_neteq_stats.h
@@ -17,33 +17,33 @@
 
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoENetEqStats
-{
-public:
-    // Factory for the VoENetEqStats sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoENetEqStats {
+ public:
+  // Factory for the VoENetEqStats sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoENetEqStats sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoENetEqStats sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Get the "in-call" statistics from NetEQ.
-    // The statistics are reset after the query.
-    virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
+  // Get the "in-call" statistics from NetEQ.
+  // The statistics are reset after the query.
+  virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
 
-    // Get statistics of calls to AudioCodingModule::PlayoutData10Ms().
-    virtual int GetDecodingCallStatistics(
-        int channel, AudioDecodingCallStats* stats) const = 0;
+  // Get statistics of calls to AudioCodingModule::PlayoutData10Ms().
+  virtual int GetDecodingCallStatistics(
+      int channel,
+      AudioDecodingCallStats* stats) const = 0;
 
-protected:
-    VoENetEqStats() {}
-    virtual ~VoENetEqStats() {}
+ protected:
+  VoENetEqStats() {}
+  virtual ~VoENetEqStats() {}
 };
 
 }  // namespace webrtc
 
-#endif    // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
diff --git a/webrtc/voice_engine/include/voe_network.h b/webrtc/voice_engine/include/voe_network.h
index ff8b8e1..56b1446 100644
--- a/webrtc/voice_engine/include/voe_network.h
+++ b/webrtc/voice_engine/include/voe_network.h
@@ -41,52 +41,50 @@
 class VoiceEngine;
 
 // VoENetwork
-class WEBRTC_DLLEXPORT VoENetwork
-{
-public:
-    // Factory for the VoENetwork sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoENetwork {
+ public:
+  // Factory for the VoENetwork sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoENetwork sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoENetwork sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Installs and enables a user-defined external transport protocol for a
-    // specified |channel|.
-    virtual int RegisterExternalTransport(
-        int channel, Transport& transport) = 0;
+  // Installs and enables a user-defined external transport protocol for a
+  // specified |channel|.
+  virtual int RegisterExternalTransport(int channel, Transport& transport) = 0;
 
-    // Removes and disables a user-defined external transport protocol for a
-    // specified |channel|.
-    virtual int DeRegisterExternalTransport(int channel) = 0;
+  // Removes and disables a user-defined external transport protocol for a
+  // specified |channel|.
+  virtual int DeRegisterExternalTransport(int channel) = 0;
 
-    // The packets received from the network should be passed to this
-    // function when external transport is enabled. Note that the data
-    // including the RTP-header must also be given to the VoiceEngine.
-    virtual int ReceivedRTPPacket(int channel,
-                                  const void* data,
-                                  size_t length) = 0;
-    virtual int ReceivedRTPPacket(int channel,
-                                  const void* data,
-                                  size_t length,
-                                  const PacketTime& packet_time) {
-      return 0;
-    }
+  // The packets received from the network should be passed to this
+  // function when external transport is enabled. Note that the data
+  // including the RTP-header must also be given to the VoiceEngine.
+  virtual int ReceivedRTPPacket(int channel,
+                                const void* data,
+                                size_t length) = 0;
+  virtual int ReceivedRTPPacket(int channel,
+                                const void* data,
+                                size_t length,
+                                const PacketTime& packet_time) {
+    return 0;
+  }
 
-    // The packets received from the network should be passed to this
-    // function when external transport is enabled. Note that the data
-    // including the RTCP-header must also be given to the VoiceEngine.
-    virtual int ReceivedRTCPPacket(int channel,
-                                   const void* data,
-                                   size_t length) = 0;
+  // The packets received from the network should be passed to this
+  // function when external transport is enabled. Note that the data
+  // including the RTCP-header must also be given to the VoiceEngine.
+  virtual int ReceivedRTCPPacket(int channel,
+                                 const void* data,
+                                 size_t length) = 0;
 
-protected:
-    VoENetwork() {}
-    virtual ~VoENetwork() {}
+ protected:
+  VoENetwork() {}
+  virtual ~VoENetwork() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_rtp_rtcp.h b/webrtc/voice_engine/include/voe_rtp_rtcp.h
index fedb134..421afd8 100644
--- a/webrtc/voice_engine/include/voe_rtp_rtcp.h
+++ b/webrtc/voice_engine/include/voe_rtp_rtcp.h
@@ -48,34 +48,32 @@
 class VoiceEngine;
 
 // VoERTPObserver
-class WEBRTC_DLLEXPORT VoERTPObserver
-{
-public:
-    virtual void OnIncomingCSRCChanged(
-        int channel, unsigned int CSRC, bool added) = 0;
+class WEBRTC_DLLEXPORT VoERTPObserver {
+ public:
+  virtual void OnIncomingCSRCChanged(int channel,
+                                     unsigned int CSRC,
+                                     bool added) = 0;
 
-    virtual void OnIncomingSSRCChanged(
-        int channel, unsigned int SSRC) = 0;
+  virtual void OnIncomingSSRCChanged(int channel, unsigned int SSRC) = 0;
 
-protected:
-    virtual ~VoERTPObserver() {}
+ protected:
+  virtual ~VoERTPObserver() {}
 };
 
 // CallStatistics
-struct CallStatistics
-{
-    unsigned short fractionLost;
-    unsigned int cumulativeLost;
-    unsigned int extendedMax;
-    unsigned int jitterSamples;
-    int64_t rttMs;
-    size_t bytesSent;
-    int packetsSent;
-    size_t bytesReceived;
-    int packetsReceived;
-    // The capture ntp time (in local timebase) of the first played out audio
-    // frame.
-    int64_t capture_start_ntp_time_ms_;
+struct CallStatistics {
+  unsigned short fractionLost;
+  unsigned int cumulativeLost;
+  unsigned int extendedMax;
+  unsigned int jitterSamples;
+  int64_t rttMs;
+  size_t bytesSent;
+  int packetsSent;
+  size_t bytesReceived;
+  int packetsReceived;
+  // The capture ntp time (in local timebase) of the first played out audio
+  // frame.
+  int64_t capture_start_ntp_time_ms_;
 };
 
 // See section 6.4.1 in http://www.ietf.org/rfc/rfc3550.txt for details.
@@ -89,7 +87,7 @@
 
 // See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
 struct ReportBlock {
-  uint32_t sender_SSRC; // SSRC of sender
+  uint32_t sender_SSRC;  // SSRC of sender
   uint32_t source_SSRC;
   uint8_t fraction_lost;
   uint32_t cumulative_num_packets_lost;
@@ -100,177 +98,191 @@
 };
 
 // VoERTP_RTCP
-class WEBRTC_DLLEXPORT VoERTP_RTCP
-{
-public:
+class WEBRTC_DLLEXPORT VoERTP_RTCP {
+ public:
+  // Factory for the VoERTP_RTCP sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
 
-    // Factory for the VoERTP_RTCP sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
+  // Releases the VoERTP_RTCP sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Releases the VoERTP_RTCP sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Sets the local RTP synchronization source identifier (SSRC) explicitly.
+  virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
 
-    // Sets the local RTP synchronization source identifier (SSRC) explicitly.
-    virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
+  // Gets the local RTP SSRC of a specified |channel|.
+  virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
 
-    // Gets the local RTP SSRC of a specified |channel|.
-    virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
+  // Gets the SSRC of the incoming RTP packets.
+  virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
 
-    // Gets the SSRC of the incoming RTP packets.
-    virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
-
-    // Sets the status of rtp-audio-level-indication on a specific |channel|.
-    virtual int SetSendAudioLevelIndicationStatus(int channel,
-                                                  bool enable,
-                                                  unsigned char id = 1) = 0;
-
-    // Sets the status of receiving rtp-audio-level-indication on a specific
-    // |channel|.
-    virtual int SetReceiveAudioLevelIndicationStatus(int channel,
-                                                     bool enable,
-                                                     unsigned char id = 1) {
-      // TODO(wu): Remove default implementation once talk is updated.
-      return 0;
-    }
-
-    // Sets the status of sending absolute sender time on a specific |channel|.
-    virtual int SetSendAbsoluteSenderTimeStatus(int channel,
+  // Sets the status of rtp-audio-level-indication on a specific |channel|.
+  virtual int SetSendAudioLevelIndicationStatus(int channel,
                                                 bool enable,
-                                                unsigned char id) = 0;
+                                                unsigned char id = 1) = 0;
 
-    // Sets status of receiving absolute sender time on a specific |channel|.
-    virtual int SetReceiveAbsoluteSenderTimeStatus(int channel,
+  // Sets the status of receiving rtp-audio-level-indication on a specific
+  // |channel|.
+  virtual int SetReceiveAudioLevelIndicationStatus(int channel,
                                                    bool enable,
-                                                   unsigned char id) = 0;
+                                                   unsigned char id = 1) {
+    // TODO(wu): Remove default implementation once talk is updated.
+    return 0;
+  }
 
-    // Sets the RTCP status on a specific |channel|.
-    virtual int SetRTCPStatus(int channel, bool enable) = 0;
+  // Sets the status of sending absolute sender time on a specific |channel|.
+  virtual int SetSendAbsoluteSenderTimeStatus(int channel,
+                                              bool enable,
+                                              unsigned char id) = 0;
 
-    // Gets the RTCP status on a specific |channel|.
-    virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
+  // Sets status of receiving absolute sender time on a specific |channel|.
+  virtual int SetReceiveAbsoluteSenderTimeStatus(int channel,
+                                                 bool enable,
+                                                 unsigned char id) = 0;
 
-    // Sets the canonical name (CNAME) parameter for RTCP reports on a
-    // specific |channel|.
-    virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
+  // Sets the RTCP status on a specific |channel|.
+  virtual int SetRTCPStatus(int channel, bool enable) = 0;
 
-    // TODO(holmer): Remove this API once it has been removed from
-    // fakewebrtcvoiceengine.h.
-    virtual int GetRTCP_CNAME(int channel, char cName[256]) {
-      return -1;
-    }
+  // Gets the RTCP status on a specific |channel|.
+  virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
 
-    // Gets the canonical name (CNAME) parameter for incoming RTCP reports
-    // on a specific channel.
-    virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
+  // Sets the canonical name (CNAME) parameter for RTCP reports on a
+  // specific |channel|.
+  virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
 
-    // Gets RTCP data from incoming RTCP Sender Reports.
-    virtual int GetRemoteRTCPData(
-        int channel, unsigned int& NTPHigh, unsigned int& NTPLow,
-        unsigned int& timestamp, unsigned int& playoutTimestamp,
-        unsigned int* jitter = NULL, unsigned short* fractionLost = NULL) = 0;
+  // TODO(holmer): Remove this API once it has been removed from
+  // fakewebrtcvoiceengine.h.
+  virtual int GetRTCP_CNAME(int channel, char cName[256]) { return -1; }
 
-    // Gets RTP statistics for a specific |channel|.
-    virtual int GetRTPStatistics(
-        int channel, unsigned int& averageJitterMs, unsigned int& maxJitterMs,
-        unsigned int& discardedPackets) = 0;
+  // Gets the canonical name (CNAME) parameter for incoming RTCP reports
+  // on a specific channel.
+  virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
 
-    // Gets RTCP statistics for a specific |channel|.
-    virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
+  // Gets RTCP data from incoming RTCP Sender Reports.
+  virtual int GetRemoteRTCPData(int channel,
+                                unsigned int& NTPHigh,
+                                unsigned int& NTPLow,
+                                unsigned int& timestamp,
+                                unsigned int& playoutTimestamp,
+                                unsigned int* jitter = NULL,
+                                unsigned short* fractionLost = NULL) = 0;
 
-    // Gets the report block parts of the last received RTCP Sender Report (SR),
-    // or RTCP Receiver Report (RR) on a specified |channel|. Each vector
-    // element also contains the SSRC of the sender in addition to a report
-    // block.
-    virtual int GetRemoteRTCPReportBlocks(
-        int channel, std::vector<ReportBlock>* receive_blocks) = 0;
+  // Gets RTP statistics for a specific |channel|.
+  virtual int GetRTPStatistics(int channel,
+                               unsigned int& averageJitterMs,
+                               unsigned int& maxJitterMs,
+                               unsigned int& discardedPackets) = 0;
 
-    // Sets the Redundant Coding (RED) status on a specific |channel|.
-    // TODO(minyue): Make SetREDStatus() pure virtual when fakewebrtcvoiceengine
-    // in talk is ready.
-    virtual int SetREDStatus(
-        int channel, bool enable, int redPayloadtype = -1) { return -1; }
+  // Gets RTCP statistics for a specific |channel|.
+  virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
 
-    // Gets the RED status on a specific |channel|.
-    // TODO(minyue): Make GetREDStatus() pure virtual when fakewebrtcvoiceengine
-    // in talk is ready.
-    virtual int GetREDStatus(
-        int channel, bool& enabled, int& redPayloadtype) { return -1; }
+  // Gets the report block parts of the last received RTCP Sender Report (SR),
+  // or RTCP Receiver Report (RR) on a specified |channel|. Each vector
+  // element also contains the SSRC of the sender in addition to a report
+  // block.
+  virtual int GetRemoteRTCPReportBlocks(
+      int channel,
+      std::vector<ReportBlock>* receive_blocks) = 0;
 
-    // Sets the Forward Error Correction (FEC) status on a specific |channel|.
-    // TODO(minyue): Remove SetFECStatus() when SetFECStatus() is replaced by
-    // SetREDStatus() in fakewebrtcvoiceengine.
-    virtual int SetFECStatus(
-        int channel, bool enable, int redPayloadtype = -1) {
-      return SetREDStatus(channel, enable, redPayloadtype);
-    };
+  // Sets the Redundant Coding (RED) status on a specific |channel|.
+  // TODO(minyue): Make SetREDStatus() pure virtual when fakewebrtcvoiceengine
+  // in talk is ready.
+  virtual int SetREDStatus(int channel, bool enable, int redPayloadtype = -1) {
+    return -1;
+  }
 
-    // Gets the FEC status on a specific |channel|.
-    // TODO(minyue): Remove GetFECStatus() when GetFECStatus() is replaced by
-    // GetREDStatus() in fakewebrtcvoiceengine.
-    virtual int GetFECStatus(
-        int channel, bool& enabled, int& redPayloadtype) {
-      return SetREDStatus(channel, enabled, redPayloadtype);
-    }
+  // Gets the RED status on a specific |channel|.
+  // TODO(minyue): Make GetREDStatus() pure virtual when fakewebrtcvoiceengine
+  // in talk is ready.
+  virtual int GetREDStatus(int channel, bool& enabled, int& redPayloadtype) {
+    return -1;
+  }
 
-    // This function enables Negative Acknowledgment (NACK) using RTCP,
-    // implemented based on RFC 4585. NACK retransmits RTP packets if lost on
-    // the network. This creates a lossless transport at the expense of delay.
-    // If using NACK, NACK should be enabled on both endpoints in a call.
-    virtual int SetNACKStatus(int channel,
-                              bool enable,
-                              int maxNoPackets) = 0;
+  // Sets the Forward Error Correction (FEC) status on a specific |channel|.
+  // TODO(minyue): Remove SetFECStatus() when SetFECStatus() is replaced by
+  // SetREDStatus() in fakewebrtcvoiceengine.
+  virtual int SetFECStatus(int channel, bool enable, int redPayloadtype = -1) {
+    return SetREDStatus(channel, enable, redPayloadtype);
+  };
 
-    // Enables capturing of RTP packets to a binary file on a specific
-    // |channel| and for a given |direction|. The file can later be replayed
-    // using e.g. RTP Tools rtpplay since the binary file format is
-    // compatible with the rtpdump format.
-    virtual int StartRTPDump(
-        int channel, const char fileNameUTF8[1024],
-        RTPDirections direction = kRtpIncoming) = 0;
+  // Gets the FEC status on a specific |channel|.
+  // TODO(minyue): Remove GetFECStatus() when GetFECStatus() is replaced by
+  // GetREDStatus() in fakewebrtcvoiceengine.
+  virtual int GetFECStatus(int channel, bool& enabled, int& redPayloadtype) {
+    return SetREDStatus(channel, enabled, redPayloadtype);
+  }
 
-    // Disables capturing of RTP packets to a binary file on a specific
-    // |channel| and for a given |direction|.
-    virtual int StopRTPDump(
-        int channel, RTPDirections direction = kRtpIncoming) = 0;
+  // This function enables Negative Acknowledgment (NACK) using RTCP,
+  // implemented based on RFC 4585. NACK retransmits RTP packets if lost on
+  // the network. This creates a lossless transport at the expense of delay.
+  // If using NACK, NACK should be enabled on both endpoints in a call.
+  virtual int SetNACKStatus(int channel, bool enable, int maxNoPackets) = 0;
 
-    // Gets the the current RTP capturing state for the specified
-    // |channel| and |direction|.
-    virtual int RTPDumpIsActive(
-        int channel, RTPDirections direction = kRtpIncoming) = 0;
+  // Enables capturing of RTP packets to a binary file on a specific
+  // |channel| and for a given |direction|. The file can later be replayed
+  // using e.g. RTP Tools rtpplay since the binary file format is
+  // compatible with the rtpdump format.
+  virtual int StartRTPDump(int channel,
+                           const char fileNameUTF8[1024],
+                           RTPDirections direction = kRtpIncoming) = 0;
 
-    // Sets video engine channel to receive incoming audio packets for
-    // aggregated bandwidth estimation. Takes ownership of the ViENetwork
-    // interface.
-    virtual int SetVideoEngineBWETarget(int channel, ViENetwork* vie_network,
-                                        int video_channel) {
-      return 0;
-    }
+  // Disables capturing of RTP packets to a binary file on a specific
+  // |channel| and for a given |direction|.
+  virtual int StopRTPDump(int channel,
+                          RTPDirections direction = kRtpIncoming) = 0;
 
-    // Will be removed. Don't use.
-    virtual int RegisterRTPObserver(int channel,
-            VoERTPObserver& observer) { return -1; };
-    virtual int DeRegisterRTPObserver(int channel) { return -1; };
-    virtual int GetRemoteCSRCs(int channel,
-            unsigned int arrCSRC[15]) { return -1; };
-    virtual int InsertExtraRTPPacket(
-            int channel, unsigned char payloadType, bool markerBit,
-            const char* payloadData, unsigned short payloadSize) { return -1; };
-    virtual int GetRemoteRTCPSenderInfo(
-            int channel, SenderInfo* sender_info) { return -1; };
-    virtual int SendApplicationDefinedRTCPPacket(
-            int channel, unsigned char subType, unsigned int name,
-            const char* data, unsigned short dataLengthInBytes) { return -1; };
-    virtual int GetLastRemoteTimeStamp(int channel,
-            uint32_t* lastRemoteTimeStamp) { return -1; };
+  // Gets the the current RTP capturing state for the specified
+  // |channel| and |direction|.
+  virtual int RTPDumpIsActive(int channel,
+                              RTPDirections direction = kRtpIncoming) = 0;
 
-protected:
-    VoERTP_RTCP() {}
-    virtual ~VoERTP_RTCP() {}
+  // Sets video engine channel to receive incoming audio packets for
+  // aggregated bandwidth estimation. Takes ownership of the ViENetwork
+  // interface.
+  virtual int SetVideoEngineBWETarget(int channel,
+                                      ViENetwork* vie_network,
+                                      int video_channel) {
+    return 0;
+  }
+
+  // Will be removed. Don't use.
+  virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer) {
+    return -1;
+  };
+  virtual int DeRegisterRTPObserver(int channel) { return -1; };
+  virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]) {
+    return -1;
+  };
+  virtual int InsertExtraRTPPacket(int channel,
+                                   unsigned char payloadType,
+                                   bool markerBit,
+                                   const char* payloadData,
+                                   unsigned short payloadSize) {
+    return -1;
+  };
+  virtual int GetRemoteRTCPSenderInfo(int channel, SenderInfo* sender_info) {
+    return -1;
+  };
+  virtual int SendApplicationDefinedRTCPPacket(
+      int channel,
+      unsigned char subType,
+      unsigned int name,
+      const char* data,
+      unsigned short dataLengthInBytes) {
+    return -1;
+  };
+  virtual int GetLastRemoteTimeStamp(int channel,
+                                     uint32_t* lastRemoteTimeStamp) {
+    return -1;
+  };
+
+ protected:
+  VoERTP_RTCP() {}
+  virtual ~VoERTP_RTCP() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_video_sync.h b/webrtc/voice_engine/include/voe_video_sync.h
index cf16d3b..1143cef 100644
--- a/webrtc/voice_engine/include/voe_video_sync.h
+++ b/webrtc/voice_engine/include/voe_video_sync.h
@@ -41,64 +41,64 @@
 class RtpRtcp;
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoEVideoSync
-{
-public:
-    // Factory for the VoEVideoSync sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEVideoSync {
+ public:
+  // Factory for the VoEVideoSync sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEVideoSync sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEVideoSync sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Gets the current sound card buffer size (playout delay).
-    virtual int GetPlayoutBufferSize(int& buffer_ms) = 0;
+  // Gets the current sound card buffer size (playout delay).
+  virtual int GetPlayoutBufferSize(int& buffer_ms) = 0;
 
-    // Sets a minimum target delay for the jitter buffer. This delay is
-    // maintained by the jitter buffer, unless channel condition (jitter in
-    // inter-arrival times) dictates a higher required delay. The overall
-    // jitter buffer delay is max of |delay_ms| and the latency that NetEq
-    // computes based on inter-arrival times and its playout mode.
-    virtual int SetMinimumPlayoutDelay(int channel, int delay_ms) = 0;
+  // Sets a minimum target delay for the jitter buffer. This delay is
+  // maintained by the jitter buffer, unless channel condition (jitter in
+  // inter-arrival times) dictates a higher required delay. The overall
+  // jitter buffer delay is max of |delay_ms| and the latency that NetEq
+  // computes based on inter-arrival times and its playout mode.
+  virtual int SetMinimumPlayoutDelay(int channel, int delay_ms) = 0;
 
-    // Sets an initial delay for the playout jitter buffer. The playout of the
-    // audio is delayed by |delay_ms| in milliseconds. Thereafter, the delay is
-    // maintained, unless NetEq's internal mechanism requires a higher latency.
-    // Such a latency is computed based on inter-arrival times and NetEq's
-    // playout mode.
-    virtual int SetInitialPlayoutDelay(int channel, int delay_ms) = 0;
+  // Sets an initial delay for the playout jitter buffer. The playout of the
+  // audio is delayed by |delay_ms| in milliseconds. Thereafter, the delay is
+  // maintained, unless NetEq's internal mechanism requires a higher latency.
+  // Such a latency is computed based on inter-arrival times and NetEq's
+  // playout mode.
+  virtual int SetInitialPlayoutDelay(int channel, int delay_ms) = 0;
 
-    // Gets the |jitter_buffer_delay_ms| (including the algorithmic delay), and
-    // the |playout_buffer_delay_ms| for a specified |channel|.
-    virtual int GetDelayEstimate(int channel,
-                                 int* jitter_buffer_delay_ms,
-                                 int* playout_buffer_delay_ms) = 0;
+  // Gets the |jitter_buffer_delay_ms| (including the algorithmic delay), and
+  // the |playout_buffer_delay_ms| for a specified |channel|.
+  virtual int GetDelayEstimate(int channel,
+                               int* jitter_buffer_delay_ms,
+                               int* playout_buffer_delay_ms) = 0;
 
-    // Returns the least required jitter buffer delay. This is computed by the
-    // the jitter buffer based on the inter-arrival time of RTP packets and
-    // playout mode. NetEq maintains this latency unless a higher value is
-    // requested by calling SetMinimumPlayoutDelay().
-    virtual int GetLeastRequiredDelayMs(int channel) const = 0;
+  // Returns the least required jitter buffer delay. This is computed by the
+  // the jitter buffer based on the inter-arrival time of RTP packets and
+  // playout mode. NetEq maintains this latency unless a higher value is
+  // requested by calling SetMinimumPlayoutDelay().
+  virtual int GetLeastRequiredDelayMs(int channel) const = 0;
 
-    // Manual initialization of the RTP timestamp.
-    virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
+  // Manual initialization of the RTP timestamp.
+  virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
 
-    // Manual initialization of the RTP sequence number.
-    virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
+  // Manual initialization of the RTP sequence number.
+  virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
 
-    // Get the received RTP timestamp
-    virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
+  // Get the received RTP timestamp
+  virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
 
-    virtual int GetRtpRtcp (int channel, RtpRtcp** rtpRtcpModule,
-                            RtpReceiver** rtp_receiver) = 0;
+  virtual int GetRtpRtcp(int channel,
+                         RtpRtcp** rtpRtcpModule,
+                         RtpReceiver** rtp_receiver) = 0;
 
-protected:
-    VoEVideoSync() { }
-    virtual ~VoEVideoSync() { }
+ protected:
+  VoEVideoSync() {}
+  virtual ~VoEVideoSync() {}
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/include/voe_volume_control.h b/webrtc/voice_engine/include/voe_volume_control.h
index 89b64c1..6c0bc0a 100644
--- a/webrtc/voice_engine/include/voe_volume_control.h
+++ b/webrtc/voice_engine/include/voe_volume_control.h
@@ -42,78 +42,77 @@
 
 class VoiceEngine;
 
-class WEBRTC_DLLEXPORT VoEVolumeControl
-{
-public:
-    // Factory for the VoEVolumeControl sub-API. Increases an internal
-    // reference counter if successful. Returns NULL if the API is not
-    // supported or if construction fails.
-    static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
+class WEBRTC_DLLEXPORT VoEVolumeControl {
+ public:
+  // Factory for the VoEVolumeControl sub-API. Increases an internal
+  // reference counter if successful. Returns NULL if the API is not
+  // supported or if construction fails.
+  static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
 
-    // Releases the VoEVolumeControl sub-API and decreases an internal
-    // reference counter. Returns the new reference count. This value should
-    // be zero for all sub-API:s before the VoiceEngine object can be safely
-    // deleted.
-    virtual int Release() = 0;
+  // Releases the VoEVolumeControl sub-API and decreases an internal
+  // reference counter. Returns the new reference count. This value should
+  // be zero for all sub-API:s before the VoiceEngine object can be safely
+  // deleted.
+  virtual int Release() = 0;
 
-    // Sets the speaker |volume| level. Valid range is [0,255].
-    virtual int SetSpeakerVolume(unsigned int volume) = 0;
+  // Sets the speaker |volume| level. Valid range is [0,255].
+  virtual int SetSpeakerVolume(unsigned int volume) = 0;
 
-    // Gets the speaker |volume| level.
-    virtual int GetSpeakerVolume(unsigned int& volume) = 0;
+  // Gets the speaker |volume| level.
+  virtual int GetSpeakerVolume(unsigned int& volume) = 0;
 
-    // Sets the microphone volume level. Valid range is [0,255].
-    virtual int SetMicVolume(unsigned int volume) = 0;
+  // Sets the microphone volume level. Valid range is [0,255].
+  virtual int SetMicVolume(unsigned int volume) = 0;
 
-    // Gets the microphone volume level.
-    virtual int GetMicVolume(unsigned int& volume) = 0;
+  // Gets the microphone volume level.
+  virtual int GetMicVolume(unsigned int& volume) = 0;
 
-    // Mutes the microphone input signal completely without affecting
-    // the audio device volume.
-    virtual int SetInputMute(int channel, bool enable) = 0;
+  // Mutes the microphone input signal completely without affecting
+  // the audio device volume.
+  virtual int SetInputMute(int channel, bool enable) = 0;
 
-    // Gets the current microphone input mute state.
-    virtual int GetInputMute(int channel, bool& enabled) = 0;
+  // Gets the current microphone input mute state.
+  virtual int GetInputMute(int channel, bool& enabled) = 0;
 
-    // Gets the microphone speech |level|, mapped non-linearly to the range
-    // [0,9].
-    virtual int GetSpeechInputLevel(unsigned int& level) = 0;
+  // Gets the microphone speech |level|, mapped non-linearly to the range
+  // [0,9].
+  virtual int GetSpeechInputLevel(unsigned int& level) = 0;
 
-    // Gets the speaker speech |level|, mapped non-linearly to the range
-    // [0,9].
-    virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
+  // Gets the speaker speech |level|, mapped non-linearly to the range
+  // [0,9].
+  virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
 
-    // Gets the microphone speech |level|, mapped linearly to the range
-    // [0,32768].
-    virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
+  // Gets the microphone speech |level|, mapped linearly to the range
+  // [0,32768].
+  virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
 
-    // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
-    virtual int GetSpeechOutputLevelFullRange(
-        int channel, unsigned int& level) = 0;
+  // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
+  virtual int GetSpeechOutputLevelFullRange(int channel,
+                                            unsigned int& level) = 0;
 
-    // Sets a volume |scaling| applied to the outgoing signal of a specific
-    // channel. Valid scale range is [0.0, 10.0].
-    virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
+  // Sets a volume |scaling| applied to the outgoing signal of a specific
+  // channel. Valid scale range is [0.0, 10.0].
+  virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
 
-    // Gets the current volume scaling for a specified |channel|.
-    virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
+  // Gets the current volume scaling for a specified |channel|.
+  virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
 
-    // Scales volume of the |left| and |right| channels independently.
-    // Valid scale range is [0.0, 1.0].
-    virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
+  // Scales volume of the |left| and |right| channels independently.
+  // Valid scale range is [0.0, 1.0].
+  virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
 
-    // Gets the current left and right scaling factors.
-    virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
+  // Gets the current left and right scaling factors.
+  virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
 
-    // Don't use. Will be removed.
-    virtual int SetSystemOutputMute(bool enable) { return -1; }
-    virtual int GetSystemOutputMute(bool &enabled) { return -1; }
-    virtual int SetSystemInputMute(bool enable) { return -1; }
-    virtual int GetSystemInputMute(bool& enabled) { return -1; }
+  // Don't use. Will be removed.
+  virtual int SetSystemOutputMute(bool enable) { return -1; }
+  virtual int GetSystemOutputMute(bool& enabled) { return -1; }
+  virtual int SetSystemInputMute(bool enable) { return -1; }
+  virtual int GetSystemInputMute(bool& enabled) { return -1; }
 
-protected:
-    VoEVolumeControl() {};
-    virtual ~VoEVolumeControl() {};
+ protected:
+  VoEVolumeControl(){};
+  virtual ~VoEVolumeControl(){};
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_audio_processing_impl.cc b/webrtc/voice_engine/voe_audio_processing_impl.cc
index a310628..7569c75 100644
--- a/webrtc/voice_engine/voe_audio_processing_impl.cc
+++ b/webrtc/voice_engine/voe_audio_processing_impl.cc
@@ -59,8 +59,7 @@
 
 #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
 VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared)
-    : _isAecMode(kDefaultEcMode == kEcAec),
-      _shared(shared) {
+    : _isAecMode(kDefaultEcMode == kEcAec), _shared(shared) {
   WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor");
 }
@@ -104,22 +103,22 @@
       break;
   }
 
-  if (_shared->audio_processing()->noise_suppression()->
-          set_level(nsLevel) != 0) {
+  if (_shared->audio_processing()->noise_suppression()->set_level(nsLevel) !=
+      0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetNsStatus() failed to set Ns mode");
+                          "SetNsStatus() failed to set Ns mode");
     return -1;
   }
   if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetNsStatus() failed to set Ns state");
+                          "SetNsStatus() failed to set Ns state");
     return -1;
   }
 
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetNsStatus() Ns is not supported");
+                        "SetNsStatus() Ns is not supported");
   return -1;
 #endif
 }
@@ -157,7 +156,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetNsStatus() Ns is not supported");
+                        "GetNsStatus() Ns is not supported");
   return -1;
 #endif
 }
@@ -174,7 +173,7 @@
 #if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
   if (mode == kAgcAdaptiveAnalog) {
     _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-        "SetAgcStatus() invalid Agc mode for mobile device");
+                          "SetAgcStatus() invalid Agc mode for mobile device");
     return -1;
   }
 #endif
@@ -200,12 +199,12 @@
 
   if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetAgcStatus() failed to set Agc mode");
+                          "SetAgcStatus() failed to set Agc mode");
     return -1;
   }
   if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetAgcStatus() failed to set Agc state");
+                          "SetAgcStatus() failed to set Agc state");
     return -1;
   }
 
@@ -215,15 +214,15 @@
     // used since we want to be able to provide the APM with updated mic
     // levels when the user modifies the mic level manually.
     if (_shared->audio_device()->SetAGC(enable) != 0) {
-      _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
-          kTraceWarning, "SetAgcStatus() failed to set Agc mode");
+      _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+                            "SetAgcStatus() failed to set Agc mode");
     }
   }
 
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetAgcStatus() Agc is not supported");
+                        "SetAgcStatus() Agc is not supported");
   return -1;
 #endif
 }
@@ -239,7 +238,7 @@
 
   enabled = _shared->audio_processing()->gain_control()->is_enabled();
   GainControl::Mode agcMode =
-    _shared->audio_processing()->gain_control()->mode();
+      _shared->audio_processing()->gain_control()->mode();
 
   switch (agcMode) {
     case GainControl::kFixedDigital:
@@ -258,7 +257,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetAgcStatus() Agc is not supported");
+                        "GetAgcStatus() Agc is not supported");
   return -1;
 #endif
 }
@@ -273,22 +272,23 @@
   }
 
   if (_shared->audio_processing()->gain_control()->set_target_level_dbfs(
-      config.targetLeveldBOv) != 0) {
+          config.targetLeveldBOv) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetAgcConfig() failed to set target peak |level|"
-        " (or envelope) of the Agc");
+                          "SetAgcConfig() failed to set target peak |level|"
+                          " (or envelope) of the Agc");
     return -1;
   }
   if (_shared->audio_processing()->gain_control()->set_compression_gain_db(
-        config.digitalCompressionGaindB) != 0) {
+          config.digitalCompressionGaindB) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetAgcConfig() failed to set the range in |gain| "
-        "the digital compression stage may apply");
+                          "SetAgcConfig() failed to set the range in |gain| "
+                          "the digital compression stage may apply");
     return -1;
   }
   if (_shared->audio_processing()->gain_control()->enable_limiter(
-        config.limiterEnable) != 0) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceError,
+          config.limiterEnable) != 0) {
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceError,
         "SetAgcConfig() failed to set hard limiter to the signal");
     return -1;
   }
@@ -296,7 +296,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetAgcConfig() EC is not supported");
+                        "SetAgcConfig() EC is not supported");
   return -1;
 #endif
 }
@@ -311,23 +311,22 @@
   }
 
   config.targetLeveldBOv =
-    _shared->audio_processing()->gain_control()->target_level_dbfs();
+      _shared->audio_processing()->gain_control()->target_level_dbfs();
   config.digitalCompressionGaindB =
-    _shared->audio_processing()->gain_control()->compression_gain_db();
+      _shared->audio_processing()->gain_control()->compression_gain_db();
   config.limiterEnable =
-    _shared->audio_processing()->gain_control()->is_limiter_enabled();
+      _shared->audio_processing()->gain_control()->is_limiter_enabled();
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetAgcConfig() => targetLeveldBOv=%u, "
-                  "digitalCompressionGaindB=%u, limiterEnable=%d",
-               config.targetLeveldBOv,
-               config.digitalCompressionGaindB,
+               "digitalCompressionGaindB=%u, limiterEnable=%d",
+               config.targetLeveldBOv, config.digitalCompressionGaindB,
                config.limiterEnable);
 
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetAgcConfig() EC is not supported");
+                        "GetAgcConfig() EC is not supported");
   return -1;
 #endif
 }
@@ -346,13 +345,13 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "SetRxNsStatus() failed to locate channel");
+                          "SetRxNsStatus() failed to locate channel");
     return -1;
   }
   return channelPtr->SetRxNsStatus(enable, mode);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetRxNsStatus() NS is not supported");
+                        "SetRxNsStatus() NS is not supported");
   return -1;
 #endif
 }
@@ -372,13 +371,13 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "GetRxNsStatus() failed to locate channel");
+                          "GetRxNsStatus() failed to locate channel");
     return -1;
   }
   return channelPtr->GetRxNsStatus(enabled, mode);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetRxNsStatus() NS is not supported");
+                        "GetRxNsStatus() NS is not supported");
   return -1;
 #endif
 }
@@ -387,8 +386,8 @@
                                            bool enable,
                                            AgcModes mode) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-               "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)",
-               channel, (int)enable, (int)mode);
+               "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)", channel,
+               (int)enable, (int)mode);
 #ifdef WEBRTC_VOICE_ENGINE_AGC
   if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
@@ -399,13 +398,13 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "SetRxAgcStatus() failed to locate channel");
+                          "SetRxAgcStatus() failed to locate channel");
     return -1;
   }
   return channelPtr->SetRxAgcStatus(enable, mode);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetRxAgcStatus() Agc is not supported");
+                        "SetRxAgcStatus() Agc is not supported");
   return -1;
 #endif
 }
@@ -425,19 +424,18 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "GetRxAgcStatus() failed to locate channel");
+                          "GetRxAgcStatus() failed to locate channel");
     return -1;
   }
   return channelPtr->GetRxAgcStatus(enabled, mode);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetRxAgcStatus() Agc is not supported");
+                        "GetRxAgcStatus() Agc is not supported");
   return -1;
 #endif
 }
 
-int VoEAudioProcessingImpl::SetRxAgcConfig(int channel,
-                                           AgcConfig config) {
+int VoEAudioProcessingImpl::SetRxAgcConfig(int channel, AgcConfig config) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetRxAgcConfig(channel=%d)", channel);
 #ifdef WEBRTC_VOICE_ENGINE_AGC
@@ -450,13 +448,13 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-      "SetRxAgcConfig() failed to locate channel");
+                          "SetRxAgcConfig() failed to locate channel");
     return -1;
   }
   return channelPtr->SetRxAgcConfig(config);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetRxAgcConfig() Agc is not supported");
+                        "SetRxAgcConfig() Agc is not supported");
   return -1;
 #endif
 }
@@ -474,13 +472,13 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "GetRxAgcConfig() failed to locate channel");
+                          "GetRxAgcConfig() failed to locate channel");
     return -1;
   }
   return channelPtr->GetRxAgcConfig(config);
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetRxAgcConfig() Agc is not supported");
+                        "GetRxAgcConfig() Agc is not supported");
   return -1;
 #endif
 }
@@ -498,7 +496,8 @@
   WEBRTC_VOICE_INIT_CHECK();
 
   if (!DriftCompensationSupported()) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceWarning,
         "Drift compensation is not supported on this platform.");
     return -1;
   }
@@ -506,7 +505,7 @@
   EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
   if (aec->enable_drift_compensation(enable) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "aec->enable_drift_compensation() failed");
+                          "aec->enable_drift_compensation() failed");
     return -1;
   }
   return 0;
@@ -530,41 +529,43 @@
   }
 
   // AEC mode
-  if ((mode == kEcDefault) ||
-      (mode == kEcConference) ||
-      (mode == kEcAec) ||
-      ((mode == kEcUnchanged) &&
-       (_isAecMode == true))) {
+  if ((mode == kEcDefault) || (mode == kEcConference) || (mode == kEcAec) ||
+      ((mode == kEcUnchanged) && (_isAecMode == true))) {
     if (enable) {
       // Disable the AECM before enable the AEC
       if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) {
         _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
-            "SetEcStatus() disable AECM before enabling AEC");
-        if (_shared->audio_processing()->echo_control_mobile()->
-            Enable(false) != 0) {
+                              "SetEcStatus() disable AECM before enabling AEC");
+        if (_shared->audio_processing()->echo_control_mobile()->Enable(false) !=
+            0) {
           _shared->SetLastError(VE_APM_ERROR, kTraceError,
-              "SetEcStatus() failed to disable AECM");
+                                "SetEcStatus() failed to disable AECM");
           return -1;
         }
       }
     }
     if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) {
       _shared->SetLastError(VE_APM_ERROR, kTraceError,
-          "SetEcStatus() failed to set AEC state");
+                            "SetEcStatus() failed to set AEC state");
       return -1;
     }
     if (mode == kEcConference) {
-      if (_shared->audio_processing()->echo_cancellation()->
-          set_suppression_level(EchoCancellation::kHighSuppression) != 0) {
-        _shared->SetLastError(VE_APM_ERROR, kTraceError,
+      if (_shared->audio_processing()
+              ->echo_cancellation()
+              ->set_suppression_level(EchoCancellation::kHighSuppression) !=
+          0) {
+        _shared->SetLastError(
+            VE_APM_ERROR, kTraceError,
             "SetEcStatus() failed to set aggressiveness to high");
         return -1;
       }
     } else {
-      if (_shared->audio_processing()->echo_cancellation()->
-          set_suppression_level(
-            EchoCancellation::kModerateSuppression) != 0) {
-        _shared->SetLastError(VE_APM_ERROR, kTraceError,
+      if (_shared->audio_processing()
+              ->echo_cancellation()
+              ->set_suppression_level(EchoCancellation::kModerateSuppression) !=
+          0) {
+        _shared->SetLastError(
+            VE_APM_ERROR, kTraceError,
             "SetEcStatus() failed to set aggressiveness to moderate");
         return -1;
       }
@@ -572,38 +573,37 @@
 
     _isAecMode = true;
   } else if ((mode == kEcAecm) ||
-             ((mode == kEcUnchanged) &&
-              (_isAecMode == false))) {
+             ((mode == kEcUnchanged) && (_isAecMode == false))) {
     if (enable) {
       // Disable the AEC before enable the AECM
       if (_shared->audio_processing()->echo_cancellation()->is_enabled()) {
         _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
-            "SetEcStatus() disable AEC before enabling AECM");
-        if (_shared->audio_processing()->echo_cancellation()->
-            Enable(false) != 0) {
+                              "SetEcStatus() disable AEC before enabling AECM");
+        if (_shared->audio_processing()->echo_cancellation()->Enable(false) !=
+            0) {
           _shared->SetLastError(VE_APM_ERROR, kTraceError,
-              "SetEcStatus() failed to disable AEC");
+                                "SetEcStatus() failed to disable AEC");
           return -1;
         }
       }
     }
-    if (_shared->audio_processing()->echo_control_mobile()->
-        Enable(enable) != 0) {
+    if (_shared->audio_processing()->echo_control_mobile()->Enable(enable) !=
+        0) {
       _shared->SetLastError(VE_APM_ERROR, kTraceError,
-          "SetEcStatus() failed to set AECM state");
+                            "SetEcStatus() failed to set AECM state");
       return -1;
     }
     _isAecMode = false;
   } else {
     _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-                                   "SetEcStatus() invalid EC mode");
+                          "SetEcStatus() invalid EC mode");
     return -1;
   }
 
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetEcStatus() EC is not supported");
+                        "SetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -622,17 +622,15 @@
     enabled = _shared->audio_processing()->echo_cancellation()->is_enabled();
   } else {
     mode = kEcAecm;
-    enabled = _shared->audio_processing()->echo_control_mobile()->
-              is_enabled();
+    enabled = _shared->audio_processing()->echo_control_mobile()->is_enabled();
   }
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-               "GetEcStatus() => enabled=%i, mode=%i",
-               enabled, (int)mode);
+               "GetEcStatus() => enabled=%i, mode=%i", enabled, (int)mode);
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetEcStatus() EC is not supported");
+                        "GetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -679,16 +677,16 @@
       break;
   }
 
-
-  if (_shared->audio_processing()->echo_control_mobile()->
-      set_routing_mode(aecmMode) != 0) {
+  if (_shared->audio_processing()->echo_control_mobile()->set_routing_mode(
+          aecmMode) != 0) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetAECMMode() failed to set AECM routing mode");
+                          "SetAECMMode() failed to set AECM routing mode");
     return -1;
   }
-  if (_shared->audio_processing()->echo_control_mobile()->
-      enable_comfort_noise(enableCNG) != 0) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceError,
+  if (_shared->audio_processing()->echo_control_mobile()->enable_comfort_noise(
+          enableCNG) != 0) {
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceError,
         "SetAECMMode() failed to set comfort noise state for AECM");
     return -1;
   }
@@ -696,7 +694,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetAECMMode() EC is not supported");
+                        "SetAECMMode() EC is not supported");
   return -1;
 #endif
 }
@@ -714,8 +712,9 @@
 
   EchoControlMobile::RoutingMode aecmMode =
       _shared->audio_processing()->echo_control_mobile()->routing_mode();
-  enabledCNG = _shared->audio_processing()->echo_control_mobile()->
-      is_comfort_noise_enabled();
+  enabledCNG = _shared->audio_processing()
+                   ->echo_control_mobile()
+                   ->is_comfort_noise_enabled();
 
   switch (aecmMode) {
     case EchoControlMobile::kQuietEarpieceOrHeadset:
@@ -738,7 +737,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "GetAECMMode() EC is not supported");
+                        "GetAECMMode() EC is not supported");
   return -1;
 #endif
 }
@@ -749,7 +748,7 @@
   if (_shared->audio_processing()->high_pass_filter()->Enable(enable) !=
       AudioProcessing::kNoError) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "HighPassFilter::Enable() failed.");
+                          "HighPassFilter::Enable() failed.");
     return -1;
   }
 
@@ -762,9 +761,8 @@
   return _shared->audio_processing()->high_pass_filter()->is_enabled();
 }
 
-int VoEAudioProcessingImpl::RegisterRxVadObserver(
-  int channel,
-  VoERxVadCallback& observer) {
+int VoEAudioProcessingImpl::RegisterRxVadObserver(int channel,
+                                                  VoERxVadCallback& observer) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "RegisterRxVadObserver()");
   if (!_shared->statistics().Initialized()) {
@@ -775,7 +773,7 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "RegisterRxVadObserver() failed to locate channel");
+                          "RegisterRxVadObserver() failed to locate channel");
     return -1;
   }
   return channelPtr->RegisterRxVadObserver(observer);
@@ -792,7 +790,7 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "DeRegisterRxVadObserver() failed to locate channel");
+                          "DeRegisterRxVadObserver() failed to locate channel");
     return -1;
   }
 
@@ -811,7 +809,7 @@
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
     _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-        "DeRegisterRxVadObserver() failed to locate channel");
+                          "DeRegisterRxVadObserver() failed to locate channel");
     return -1;
   }
   int activity(-1);
@@ -829,18 +827,18 @@
     return -1;
   }
 
-  if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(enable)
-       != 0) ||
+  if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(
+           enable) != 0) ||
       (_shared->audio_processing()->echo_cancellation()->enable_delay_logging(
-         enable) != 0)) {
+           enable) != 0)) {
     _shared->SetLastError(VE_APM_ERROR, kTraceError,
-        "SetEcMetricsStatus() unable to set EC metrics mode");
+                          "SetEcMetricsStatus() unable to set EC metrics mode");
     return -1;
   }
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetEcStatus() EC is not supported");
+                        "SetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -855,12 +853,14 @@
   }
 
   bool echo_mode =
-    _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
-  bool delay_mode = _shared->audio_processing()->echo_cancellation()->
-      is_delay_logging_enabled();
+      _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
+  bool delay_mode = _shared->audio_processing()
+                        ->echo_cancellation()
+                        ->is_delay_logging_enabled();
 
   if (echo_mode != delay_mode) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceError,
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceError,
         "GetEcMetricsStatus() delay logging and echo mode are not the same");
     return -1;
   }
@@ -872,7 +872,7 @@
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetEcStatus() EC is not supported");
+                        "SetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -889,7 +889,8 @@
     return -1;
   }
   if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceWarning,
         "GetEchoMetrics() AudioProcessingModule AEC is not enabled");
     return -1;
   }
@@ -910,12 +911,12 @@
   A_NLP = echoMetrics.a_nlp.instant;
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-               "GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d",
-               ERL, ERLE, RERL, A_NLP);
+               "GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d", ERL,
+               ERLE, RERL, A_NLP);
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetEcStatus() EC is not supported");
+                        "SetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -931,7 +932,8 @@
     return -1;
   }
   if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceWarning,
         "GetEcDelayMetrics() AudioProcessingModule AEC is not enabled");
     return -1;
   }
@@ -941,7 +943,7 @@
   float poor_fraction = 0;
   // Get delay-logging values from Audio Processing Module.
   if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics(
-        &median, &std, &poor_fraction)) {
+          &median, &std, &poor_fraction)) {
     WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
                  "GetEcDelayMetrics(), AudioProcessingModule delay-logging "
                  "error");
@@ -955,12 +957,12 @@
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetEcDelayMetrics() => delay_median=%d, delay_std=%d, "
-               "fraction_poor_delays=%f", delay_median, delay_std,
-               fraction_poor_delays);
+               "fraction_poor_delays=%f",
+               delay_median, delay_std, fraction_poor_delays);
   return 0;
 #else
   _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetEcStatus() EC is not supported");
+                        "SetEcStatus() EC is not supported");
   return -1;
 #endif
 }
@@ -1014,12 +1016,13 @@
 
   if (_shared->audio_processing()->voice_detection()->Enable(enable)) {
     _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
-        "SetTypingDetectionStatus() failed to set VAD state");
+                          "SetTypingDetectionStatus() failed to set VAD state");
     return -1;
   }
   if (_shared->audio_processing()->voice_detection()->set_likelihood(
           VoiceDetection::kVeryLowLikelihood)) {
-    _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+    _shared->SetLastError(
+        VE_APM_ERROR, kTraceWarning,
         "SetTypingDetectionStatus() failed to set VAD likelihood to low");
     return -1;
   }
@@ -1043,8 +1046,7 @@
   return 0;
 }
 
-
-int VoEAudioProcessingImpl::TimeSinceLastTyping(int &seconds) {
+int VoEAudioProcessingImpl::TimeSinceLastTyping(int& seconds) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "TimeSinceLastTyping()");
 #if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
@@ -1056,16 +1058,13 @@
   }
   // Check if typing detection is enabled
   bool enabled = _shared->audio_processing()->voice_detection()->is_enabled();
-  if (enabled)
-  {
+  if (enabled) {
     _shared->transmit_mixer()->TimeSinceLastTyping(seconds);
     return 0;
-  }
-  else
-  {
+  } else {
     _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-      "SetTypingDetectionStatus is not enabled");
-  return -1;
+                          "SetTypingDetectionStatus is not enabled");
+    return -1;
   }
 #endif
 }
@@ -1084,8 +1083,9 @@
     _shared->statistics().SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
   }
-  return (_shared->transmit_mixer()->SetTypingDetectionParameters(timeWindow,
-      costPerTyping, reportingThreshold, penaltyDecay, typeEventDelay));
+  return (_shared->transmit_mixer()->SetTypingDetectionParameters(
+      timeWindow, costPerTyping, reportingThreshold, penaltyDecay,
+      typeEventDelay));
 #endif
 }
 
diff --git a/webrtc/voice_engine/voe_audio_processing_impl.h b/webrtc/voice_engine/voe_audio_processing_impl.h
index 26f7eec..63a60dc 100644
--- a/webrtc/voice_engine/voe_audio_processing_impl.h
+++ b/webrtc/voice_engine/voe_audio_processing_impl.h
@@ -19,90 +19,90 @@
 
 class VoEAudioProcessingImpl : public VoEAudioProcessing {
  public:
-  virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged);
+  int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) override;
 
-  virtual int GetNsStatus(bool& enabled, NsModes& mode);
+  int GetNsStatus(bool& enabled, NsModes& mode) override;
 
-  virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged);
+  int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) override;
 
-  virtual int GetAgcStatus(bool& enabled, AgcModes& mode);
+  int GetAgcStatus(bool& enabled, AgcModes& mode) override;
 
-  virtual int SetAgcConfig(AgcConfig config);
+  int SetAgcConfig(AgcConfig config) override;
 
-  virtual int GetAgcConfig(AgcConfig& config);
+  int GetAgcConfig(AgcConfig& config) override;
 
-  virtual int SetRxNsStatus(int channel,
-                            bool enable,
-                            NsModes mode = kNsUnchanged);
+  int SetRxNsStatus(int channel,
+                    bool enable,
+                    NsModes mode = kNsUnchanged) override;
 
-  virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode);
+  int GetRxNsStatus(int channel, bool& enabled, NsModes& mode) override;
 
-  virtual int SetRxAgcStatus(int channel,
-                             bool enable,
-                             AgcModes mode = kAgcUnchanged);
+  int SetRxAgcStatus(int channel,
+                     bool enable,
+                     AgcModes mode = kAgcUnchanged) override;
 
-  virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode);
+  int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode) override;
 
-  virtual int SetRxAgcConfig(int channel, AgcConfig config);
+  int SetRxAgcConfig(int channel, AgcConfig config) override;
 
-  virtual int GetRxAgcConfig(int channel, AgcConfig& config);
+  int GetRxAgcConfig(int channel, AgcConfig& config) override;
 
-  virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged);
-  virtual int GetEcStatus(bool& enabled, EcModes& mode);
-  virtual int EnableDriftCompensation(bool enable);
-  virtual bool DriftCompensationEnabled();
+  int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) override;
+  int GetEcStatus(bool& enabled, EcModes& mode) override;
+  int EnableDriftCompensation(bool enable) override;
+  bool DriftCompensationEnabled() override;
 
-  virtual void SetDelayOffsetMs(int offset);
-  virtual int DelayOffsetMs();
+  void SetDelayOffsetMs(int offset) override;
+  int DelayOffsetMs() override;
 
-  virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
-                          bool enableCNG = true);
+  int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+                  bool enableCNG = true) override;
 
-  virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG);
+  int GetAecmMode(AecmModes& mode, bool& enabledCNG) override;
 
-  virtual int EnableHighPassFilter(bool enable);
-  virtual bool IsHighPassFilterEnabled();
+  int EnableHighPassFilter(bool enable) override;
+  bool IsHighPassFilterEnabled() override;
 
-  virtual int RegisterRxVadObserver(int channel,
-                                    VoERxVadCallback& observer);
+  int RegisterRxVadObserver(int channel, VoERxVadCallback& observer) override;
 
-  virtual int DeRegisterRxVadObserver(int channel);
+  int DeRegisterRxVadObserver(int channel) override;
 
-  virtual int VoiceActivityIndicator(int channel);
+  int VoiceActivityIndicator(int channel) override;
 
-  virtual int SetEcMetricsStatus(bool enable);
+  int SetEcMetricsStatus(bool enable) override;
 
-  virtual int GetEcMetricsStatus(bool& enabled);
+  int GetEcMetricsStatus(bool& enabled) override;
 
-  virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);
+  int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) override;
 
-  virtual int GetEcDelayMetrics(int& delay_median, int& delay_std,
-                                float& fraction_poor_delays);
+  int GetEcDelayMetrics(int& delay_median,
+                        int& delay_std,
+                        float& fraction_poor_delays) override;
 
-  virtual int StartDebugRecording(const char* fileNameUTF8);
-  virtual int StartDebugRecording(FILE* file_handle);
+  int StartDebugRecording(const char* fileNameUTF8) override;
+  int StartDebugRecording(FILE* file_handle) override;
 
-  virtual int StopDebugRecording();
+  int StopDebugRecording() override;
 
-  virtual int SetTypingDetectionStatus(bool enable);
+  int SetTypingDetectionStatus(bool enable) override;
 
-  virtual int GetTypingDetectionStatus(bool& enabled);
+  int GetTypingDetectionStatus(bool& enabled) override;
 
-  virtual int TimeSinceLastTyping(int &seconds);
+  int TimeSinceLastTyping(int& seconds) override;
 
   // TODO(niklase) Remove default argument as soon as libJingle is updated!
-  virtual int SetTypingDetectionParameters(int timeWindow,
-                                           int costPerTyping,
-                                           int reportingThreshold,
-                                           int penaltyDecay,
-                                           int typeEventDelay = 0);
+  int SetTypingDetectionParameters(int timeWindow,
+                                   int costPerTyping,
+                                   int reportingThreshold,
+                                   int penaltyDecay,
+                                   int typeEventDelay = 0) override;
 
-  virtual void EnableStereoChannelSwapping(bool enable);
-  virtual bool IsStereoChannelSwappingEnabled();
+  void EnableStereoChannelSwapping(bool enable) override;
+  bool IsStereoChannelSwappingEnabled() override;
 
  protected:
   VoEAudioProcessingImpl(voe::SharedData* shared);
-  virtual ~VoEAudioProcessingImpl();
+  ~VoEAudioProcessingImpl() override;
 
  private:
   bool _isAecMode;
diff --git a/webrtc/voice_engine/voe_audio_processing_unittest.cc b/webrtc/voice_engine/voe_audio_processing_unittest.cc
index 8916ef1..0d725bc 100644
--- a/webrtc/voice_engine/voe_audio_processing_unittest.cc
+++ b/webrtc/voice_engine/voe_audio_processing_unittest.cc
@@ -22,8 +22,7 @@
   VoEAudioProcessingTest()
       : voe_(VoiceEngine::Create()),
         base_(VoEBase::GetInterface(voe_)),
-        audioproc_(VoEAudioProcessing::GetInterface(voe_)) {
-  }
+        audioproc_(VoEAudioProcessing::GetInterface(voe_)) {}
 
   virtual ~VoEAudioProcessingTest() {
     base_->Terminate();
diff --git a/webrtc/voice_engine/voe_codec_impl.cc b/webrtc/voice_engine/voe_codec_impl.cc
index d628fc5..b8fa5ce 100644
--- a/webrtc/voice_engine/voe_codec_impl.cc
+++ b/webrtc/voice_engine/voe_codec_impl.cc
@@ -17,167 +17,145 @@
 #include "webrtc/voice_engine/include/voe_errors.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine)
-{
+VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_CODEC_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_CODEC_API
 
-VoECodecImpl::VoECodecImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoECodecImpl() - ctor");
+VoECodecImpl::VoECodecImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoECodecImpl() - ctor");
 }
 
-VoECodecImpl::~VoECodecImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "~VoECodecImpl() - dtor");
+VoECodecImpl::~VoECodecImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "~VoECodecImpl() - dtor");
 }
 
-int VoECodecImpl::NumOfCodecs()
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "NumOfCodecs()");
+int VoECodecImpl::NumOfCodecs() {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "NumOfCodecs()");
 
-    // Number of supported codecs in the ACM
-    uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+  // Number of supported codecs in the ACM
+  uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "NumOfCodecs() => %u", nSupportedCodecs);
-    return (nSupportedCodecs);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "NumOfCodecs() => %u", nSupportedCodecs);
+  return (nSupportedCodecs);
 }
 
-int VoECodecImpl::GetCodec(int index, CodecInst& codec)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetCodec(index=%d, codec=?)", index);
-    CodecInst acmCodec;
-    if (AudioCodingModule::Codec(index, &acmCodec)
-            == -1)
-    {
-        _shared->SetLastError(VE_INVALID_LISTNR, kTraceError,
-            "GetCodec() invalid index");
-        return -1;
-    }
-    ACMToExternalCodecRepresentation(codec, acmCodec);
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
-        "channels=%d, rate=%d", codec.plname, codec.pacsize,
-        codec.plfreq, codec.pltype, codec.channels, codec.rate);
-    return 0;
+int VoECodecImpl::GetCodec(int index, CodecInst& codec) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetCodec(index=%d, codec=?)", index);
+  CodecInst acmCodec;
+  if (AudioCodingModule::Codec(index, &acmCodec) == -1) {
+    _shared->SetLastError(VE_INVALID_LISTNR, kTraceError,
+                          "GetCodec() invalid index");
+    return -1;
+  }
+  ACMToExternalCodecRepresentation(codec, acmCodec);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+               "channels=%d, rate=%d",
+               codec.plname, codec.pacsize, codec.plfreq, codec.pltype,
+               codec.channels, codec.rate);
+  return 0;
 }
 
-int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec)
-{
-    CodecInst copyCodec;
-    ExternalToACMCodecRepresentation(copyCodec, codec);
+int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec) {
+  CodecInst copyCodec;
+  ExternalToACMCodecRepresentation(copyCodec, codec);
 
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetSendCodec(channel=%d, codec)", channel);
-    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
-                 "channels=%d, rate=%d", codec.plname, codec.pacsize,
-                 codec.plfreq, codec.pltype, codec.channels, codec.rate);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    // External sanity checks performed outside the ACM
-    if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
-            (copyCodec.pacsize >= 960))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSendCodec() invalid L16 packet size");
-        return -1;
-    }
-    if (!STR_CASE_CMP(copyCodec.plname, "CN")
-            || !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT")
-            || !STR_CASE_CMP(copyCodec.plname, "RED"))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSendCodec() invalid codec name");
-        return -1;
-    }
-    if ((copyCodec.channels != 1) && (copyCodec.channels != 2))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSendCodec() invalid number of channels");
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetSendCodec() failed to locate channel");
-        return -1;
-    }
-    if (!AudioCodingModule::IsCodecValid(
-            (CodecInst&) copyCodec))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSendCodec() invalid codec");
-        return -1;
-    }
-    if (channelPtr->SetSendCodec(copyCodec) != 0)
-    {
-        _shared->SetLastError(VE_CANNOT_SET_SEND_CODEC, kTraceError,
-            "SetSendCodec() failed to set send codec");
-        return -1;
-    }
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetSendCodec(channel=%d, codec)", channel);
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+               "channels=%d, rate=%d",
+               codec.plname, codec.pacsize, codec.plfreq, codec.pltype,
+               codec.channels, codec.rate);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  // External sanity checks performed outside the ACM
+  if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
+      (copyCodec.pacsize >= 960)) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetSendCodec() invalid L16 packet size");
+    return -1;
+  }
+  if (!STR_CASE_CMP(copyCodec.plname, "CN") ||
+      !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT") ||
+      !STR_CASE_CMP(copyCodec.plname, "RED")) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetSendCodec() invalid codec name");
+    return -1;
+  }
+  if ((copyCodec.channels != 1) && (copyCodec.channels != 2)) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetSendCodec() invalid number of channels");
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetSendCodec() failed to locate channel");
+    return -1;
+  }
+  if (!AudioCodingModule::IsCodecValid((CodecInst&)copyCodec)) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetSendCodec() invalid codec");
+    return -1;
+  }
+  if (channelPtr->SetSendCodec(copyCodec) != 0) {
+    _shared->SetLastError(VE_CANNOT_SET_SEND_CODEC, kTraceError,
+                          "SetSendCodec() failed to set send codec");
+    return -1;
+  }
 
-    return 0;
+  return 0;
 }
 
-int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetSendCodec(channel=%d, codec=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetSendCodec() failed to locate channel");
-        return -1;
-    }
-    CodecInst acmCodec;
-    if (channelPtr->GetSendCodec(acmCodec) != 0)
-    {
-        _shared->SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
-            "GetSendCodec() failed to get send codec");
-        return -1;
-    }
-    ACMToExternalCodecRepresentation(codec, acmCodec);
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
-        "channels=%d, rate=%d", codec.plname, codec.pacsize,
-        codec.plfreq, codec.channels, codec.rate);
-    return 0;
+int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSendCodec(channel=%d, codec=?)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetSendCodec() failed to locate channel");
+    return -1;
+  }
+  CodecInst acmCodec;
+  if (channelPtr->GetSendCodec(acmCodec) != 0) {
+    _shared->SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
+                          "GetSendCodec() failed to get send codec");
+    return -1;
+  }
+  ACMToExternalCodecRepresentation(codec, acmCodec);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+               "channels=%d, rate=%d",
+               codec.plname, codec.pacsize, codec.plfreq, codec.channels,
+               codec.rate);
+  return 0;
 }
 
 int VoECodecImpl::SetBitRate(int channel, int bitrate_bps) {
@@ -192,119 +170,106 @@
   return 0;
 }
 
-int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRecCodec(channel=%d, codec=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRecCodec() failed to locate channel");
-        return -1;
-    }
-    CodecInst acmCodec;
-    if (channelPtr->GetRecCodec(acmCodec) != 0)
-    {
-        _shared->SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
-            "GetRecCodec() failed to get received codec");
-        return -1;
-    }
-    ACMToExternalCodecRepresentation(codec, acmCodec);
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
-        "channels=%d, rate=%d", codec.plname, codec.pacsize,
-        codec.plfreq, codec.channels, codec.rate);
-    return 0;
+int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRecCodec(channel=%d, codec=?)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRecCodec() failed to locate channel");
+    return -1;
+  }
+  CodecInst acmCodec;
+  if (channelPtr->GetRecCodec(acmCodec) != 0) {
+    _shared->SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
+                          "GetRecCodec() failed to get received codec");
+    return -1;
+  }
+  ACMToExternalCodecRepresentation(codec, acmCodec);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+               "channels=%d, rate=%d",
+               codec.plname, codec.pacsize, codec.plfreq, codec.channels,
+               codec.rate);
+  return 0;
 }
 
-int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetRecPayloadType(channel=%d, codec)", channel);
-    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetRecPayloadType(channel=%d, codec)", channel);
+  WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
-               "pacsize=%d, rate=%d", codec.plname, codec.plfreq, codec.pltype,
-               codec.channels, codec.pacsize, codec.rate);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRecPayloadType() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetRecPayloadType(codec);
+               "pacsize=%d, rate=%d",
+               codec.plname, codec.plfreq, codec.pltype, codec.channels,
+               codec.pacsize, codec.rate);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRecPayloadType() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetRecPayloadType(codec);
 }
 
-int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRecPayloadType(channel=%d, codec)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRecPayloadType() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRecPayloadType(codec);
+int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRecPayloadType(channel=%d, codec)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRecPayloadType() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRecPayloadType(codec);
 }
 
-int VoECodecImpl::SetSendCNPayloadType(int channel, int type,
-                                       PayloadFrequencies frequency)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
-                 channel, type, frequency);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (type < 96 || type > 127)
-    {
-        // Only allow dynamic range: 96 to 127
-        _shared->SetLastError(VE_INVALID_PLTYPE, kTraceError,
-            "SetSendCNPayloadType() invalid payload type");
-        return -1;
-    }
-    if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz))
-    {
-        // It is not possible to modify the payload type for CN/8000.
-        // We only allow modification of the CN payload type for CN/16000
-        // and CN/32000.
-        _shared->SetLastError(VE_INVALID_PLFREQ, kTraceError,
-            "SetSendCNPayloadType() invalid payload frequency");
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetSendCNPayloadType() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetSendCNPayloadType(type, frequency);
+int VoECodecImpl::SetSendCNPayloadType(int channel,
+                                       int type,
+                                       PayloadFrequencies frequency) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
+               channel, type, frequency);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (type < 96 || type > 127) {
+    // Only allow dynamic range: 96 to 127
+    _shared->SetLastError(VE_INVALID_PLTYPE, kTraceError,
+                          "SetSendCNPayloadType() invalid payload type");
+    return -1;
+  }
+  if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz)) {
+    // It is not possible to modify the payload type for CN/8000.
+    // We only allow modification of the CN payload type for CN/16000
+    // and CN/32000.
+    _shared->SetLastError(VE_INVALID_PLFREQ, kTraceError,
+                          "SetSendCNPayloadType() invalid payload frequency");
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetSendCNPayloadType() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetSendCNPayloadType(type, frequency);
 }
 
 int VoECodecImpl::SetFECStatus(int channel, bool enable) {
@@ -342,92 +307,87 @@
   return 0;
 }
 
-int VoECodecImpl::SetVADStatus(int channel, bool enable, VadModes mode,
-                               bool disableDTX)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
-                 channel, enable, mode, disableDTX);
+int VoECodecImpl::SetVADStatus(int channel,
+                               bool enable,
+                               VadModes mode,
+                               bool disableDTX) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
+               channel, enable, mode, disableDTX);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetVADStatus failed to locate channel");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetVADStatus failed to locate channel");
+    return -1;
+  }
 
-    ACMVADMode vadMode(VADNormal);
-    switch (mode)
-    {
-        case kVadConventional:
-            vadMode = VADNormal;
-            break;
-        case kVadAggressiveLow:
-            vadMode = VADLowBitrate;
-            break;
-        case kVadAggressiveMid:
-            vadMode = VADAggr;
-            break;
-        case kVadAggressiveHigh:
-            vadMode = VADVeryAggr;
-            break;
-    }
-    return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
+  ACMVADMode vadMode(VADNormal);
+  switch (mode) {
+    case kVadConventional:
+      vadMode = VADNormal;
+      break;
+    case kVadAggressiveLow:
+      vadMode = VADLowBitrate;
+      break;
+    case kVadAggressiveMid:
+      vadMode = VADAggr;
+      break;
+    case kVadAggressiveHigh:
+      vadMode = VADVeryAggr;
+      break;
+  }
+  return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
 }
 
-int VoECodecImpl::GetVADStatus(int channel, bool& enabled, VadModes& mode,
-                               bool& disabledDTX)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetVADStatus(channel=%i)", channel);
+int VoECodecImpl::GetVADStatus(int channel,
+                               bool& enabled,
+                               VadModes& mode,
+                               bool& disabledDTX) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetVADStatus(channel=%i)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetVADStatus failed to locate channel");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetVADStatus failed to locate channel");
+    return -1;
+  }
 
-    ACMVADMode vadMode;
-    int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
+  ACMVADMode vadMode;
+  int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
 
-    if (ret != 0)
-    {
-        _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
-            "GetVADStatus failed to get VAD mode");
-        return -1;
-    }
-    switch (vadMode)
-    {
-        case VADNormal:
-            mode = kVadConventional;
-            break;
-        case VADLowBitrate:
-            mode = kVadAggressiveLow;
-            break;
-        case VADAggr:
-            mode = kVadAggressiveMid;
-            break;
-        case VADVeryAggr:
-            mode = kVadAggressiveHigh;
-            break;
-    }
+  if (ret != 0) {
+    _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+                          "GetVADStatus failed to get VAD mode");
+    return -1;
+  }
+  switch (vadMode) {
+    case VADNormal:
+      mode = kVadConventional;
+      break;
+    case VADLowBitrate:
+      mode = kVadAggressiveLow;
+      break;
+    case VADAggr:
+      mode = kVadAggressiveMid;
+      break;
+    case VADVeryAggr:
+      mode = kVadAggressiveHigh;
+      break;
+  }
 
-    return 0;
+  return 0;
 }
 
 int VoECodecImpl::SetOpusMaxPlaybackRate(int channel, int frequency_hz) {
@@ -466,81 +426,51 @@
 }
 
 void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst,
-                                                    const CodecInst& fromInst)
-{
-    toInst = fromInst;
-    if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
-    {
-        if (fromInst.plfreq == 12000)
-        {
-            if (fromInst.pacsize == 320)
-            {
-                toInst.pacsize = 240;
-            }
-            else if (fromInst.pacsize == 640)
-            {
-                toInst.pacsize = 480;
-            }
-            else if (fromInst.pacsize == 960)
-            {
-                toInst.pacsize = 720;
-            }
-        }
-        else if (fromInst.plfreq == 24000)
-        {
-            if (fromInst.pacsize == 640)
-            {
-                toInst.pacsize = 480;
-            }
-            else if (fromInst.pacsize == 1280)
-            {
-                toInst.pacsize = 960;
-            }
-            else if (fromInst.pacsize == 1920)
-            {
-                toInst.pacsize = 1440;
-            }
-        }
+                                                    const CodecInst& fromInst) {
+  toInst = fromInst;
+  if (STR_CASE_CMP(fromInst.plname, "SILK") == 0) {
+    if (fromInst.plfreq == 12000) {
+      if (fromInst.pacsize == 320) {
+        toInst.pacsize = 240;
+      } else if (fromInst.pacsize == 640) {
+        toInst.pacsize = 480;
+      } else if (fromInst.pacsize == 960) {
+        toInst.pacsize = 720;
+      }
+    } else if (fromInst.plfreq == 24000) {
+      if (fromInst.pacsize == 640) {
+        toInst.pacsize = 480;
+      } else if (fromInst.pacsize == 1280) {
+        toInst.pacsize = 960;
+      } else if (fromInst.pacsize == 1920) {
+        toInst.pacsize = 1440;
+      }
     }
+  }
 }
 
 void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst,
-                                                    const CodecInst& fromInst)
-{
-    toInst = fromInst;
-    if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
-    {
-        if (fromInst.plfreq == 12000)
-        {
-            if (fromInst.pacsize == 240)
-            {
-                toInst.pacsize = 320;
-            }
-            else if (fromInst.pacsize == 480)
-            {
-                toInst.pacsize = 640;
-            }
-            else if (fromInst.pacsize == 720)
-            {
-                toInst.pacsize = 960;
-            }
-        }
-        else if (fromInst.plfreq == 24000)
-        {
-            if (fromInst.pacsize == 480)
-            {
-                toInst.pacsize = 640;
-            }
-            else if (fromInst.pacsize == 960)
-            {
-                toInst.pacsize = 1280;
-            }
-            else if (fromInst.pacsize == 1440)
-            {
-                toInst.pacsize = 1920;
-            }
-        }
+                                                    const CodecInst& fromInst) {
+  toInst = fromInst;
+  if (STR_CASE_CMP(fromInst.plname, "SILK") == 0) {
+    if (fromInst.plfreq == 12000) {
+      if (fromInst.pacsize == 240) {
+        toInst.pacsize = 320;
+      } else if (fromInst.pacsize == 480) {
+        toInst.pacsize = 640;
+      } else if (fromInst.pacsize == 720) {
+        toInst.pacsize = 960;
+      }
+    } else if (fromInst.plfreq == 24000) {
+      if (fromInst.pacsize == 480) {
+        toInst.pacsize = 640;
+      } else if (fromInst.pacsize == 960) {
+        toInst.pacsize = 1280;
+      } else if (fromInst.pacsize == 1440) {
+        toInst.pacsize = 1920;
+      }
     }
+  }
 }
 
 #endif  // WEBRTC_VOICE_ENGINE_CODEC_API
diff --git a/webrtc/voice_engine/voe_codec_impl.h b/webrtc/voice_engine/voe_codec_impl.h
index 8e8ac38..a60ce6d 100644
--- a/webrtc/voice_engine/voe_codec_impl.h
+++ b/webrtc/voice_engine/voe_codec_impl.h
@@ -15,63 +15,61 @@
 
 #include "webrtc/voice_engine/shared_data.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VoECodecImpl: public VoECodec
-{
-public:
-    virtual int NumOfCodecs();
+class VoECodecImpl : public VoECodec {
+ public:
+  int NumOfCodecs() override;
 
-    virtual int GetCodec(int index, CodecInst& codec);
+  int GetCodec(int index, CodecInst& codec) override;
 
-    virtual int SetSendCodec(int channel, const CodecInst& codec);
+  int SetSendCodec(int channel, const CodecInst& codec) override;
 
-    virtual int GetSendCodec(int channel, CodecInst& codec);
+  int GetSendCodec(int channel, CodecInst& codec) override;
 
-    int SetBitRate(int channel, int bitrate_bps) override;
+  int SetBitRate(int channel, int bitrate_bps) override;
 
-    virtual int GetRecCodec(int channel, CodecInst& codec);
+  int GetRecCodec(int channel, CodecInst& codec) override;
 
-    virtual int SetSendCNPayloadType(
-        int channel, int type,
-        PayloadFrequencies frequency = kFreq16000Hz);
+  int SetSendCNPayloadType(
+      int channel,
+      int type,
+      PayloadFrequencies frequency = kFreq16000Hz) override;
 
-    virtual int SetRecPayloadType(int channel,
-                                  const CodecInst& codec);
+  int SetRecPayloadType(int channel, const CodecInst& codec) override;
 
-    virtual int GetRecPayloadType(int channel, CodecInst& codec);
+  int GetRecPayloadType(int channel, CodecInst& codec) override;
 
-    virtual int SetFECStatus(int channel, bool enable);
+  int SetFECStatus(int channel, bool enable) override;
 
-    virtual int GetFECStatus(int channel, bool& enabled);
+  int GetFECStatus(int channel, bool& enabled) override;
 
-    virtual int SetVADStatus(int channel,
-                             bool enable,
-                             VadModes mode = kVadConventional,
-                             bool disableDTX = false);
+  int SetVADStatus(int channel,
+                   bool enable,
+                   VadModes mode = kVadConventional,
+                   bool disableDTX = false) override;
 
-    virtual int GetVADStatus(int channel,
-                             bool& enabled,
-                             VadModes& mode,
-                             bool& disabledDTX);
+  int GetVADStatus(int channel,
+                   bool& enabled,
+                   VadModes& mode,
+                   bool& disabledDTX) override;
 
-    virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz);
+  int SetOpusMaxPlaybackRate(int channel, int frequency_hz) override;
 
-    virtual int SetOpusDtx(int channel, bool enable_dtx);
+  int SetOpusDtx(int channel, bool enable_dtx) override;
 
-protected:
-    VoECodecImpl(voe::SharedData* shared);
-    virtual ~VoECodecImpl();
+ protected:
+  VoECodecImpl(voe::SharedData* shared);
+  ~VoECodecImpl() override;
 
-private:
-    void ACMToExternalCodecRepresentation(CodecInst& toInst,
-                                          const CodecInst& fromInst);
+ private:
+  void ACMToExternalCodecRepresentation(CodecInst& toInst,
+                                        const CodecInst& fromInst);
 
-    void ExternalToACMCodecRepresentation(CodecInst& toInst,
-                                          const CodecInst& fromInst);
+  void ExternalToACMCodecRepresentation(CodecInst& toInst,
+                                        const CodecInst& fromInst);
 
-    voe::SharedData* _shared;
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_codec_unittest.cc b/webrtc/voice_engine/voe_codec_unittest.cc
index 6eb5a51..52aa537 100644
--- a/webrtc/voice_engine/voe_codec_unittest.cc
+++ b/webrtc/voice_engine/voe_codec_unittest.cc
@@ -30,8 +30,7 @@
         voe_codec_(VoECodec::GetInterface(voe_)),
         channel_(-1),
         adm_(new FakeAudioDeviceModule),
-        red_payload_type_(-1) {
-  }
+        red_payload_type_(-1) {}
 
   ~VoECodecTest() {}
 
@@ -62,18 +61,19 @@
     // Find primary and secondary codecs.
     int num_codecs = voe_codec_->NumOfCodecs();
     int n = 0;
-    while (n < num_codecs && (!primary_found || !valid_secondary_found ||
-        !invalid_secondary_found || red_payload_type_ < 0)) {
+    while (n < num_codecs &&
+           (!primary_found || !valid_secondary_found ||
+            !invalid_secondary_found || red_payload_type_ < 0)) {
       EXPECT_EQ(0, voe_codec_->GetCodec(n, my_codec));
       if (!STR_CASE_CMP(my_codec.plname, "isac") && my_codec.plfreq == 16000) {
         memcpy(&valid_secondary_, &my_codec, sizeof(my_codec));
         valid_secondary_found = true;
       } else if (!STR_CASE_CMP(my_codec.plname, "isac") &&
-          my_codec.plfreq == 32000) {
+                 my_codec.plfreq == 32000) {
         memcpy(&invalid_secondary_, &my_codec, sizeof(my_codec));
         invalid_secondary_found = true;
       } else if (!STR_CASE_CMP(my_codec.plname, "L16") &&
-          my_codec.plfreq == 16000) {
+                 my_codec.plfreq == 16000) {
         memcpy(&primary_, &my_codec, sizeof(my_codec));
         primary_found = true;
       } else if (!STR_CASE_CMP(my_codec.plname, "RED")) {
diff --git a/webrtc/voice_engine/voe_dtmf_impl.cc b/webrtc/voice_engine/voe_dtmf_impl.cc
index 2d775e3..26cc3a8 100644
--- a/webrtc/voice_engine/voe_dtmf_impl.cc
+++ b/webrtc/voice_engine/voe_dtmf_impl.cc
@@ -20,242 +20,206 @@
 
 namespace webrtc {
 
-VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_DTMF_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
 
-VoEDtmfImpl::VoEDtmfImpl(voe::SharedData* shared) :
-    _dtmfFeedback(true),
-    _dtmfDirectFeedback(false),
-    _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEDtmfImpl::VoEDtmfImpl() - ctor");
+VoEDtmfImpl::VoEDtmfImpl(voe::SharedData* shared)
+    : _dtmfFeedback(true), _dtmfDirectFeedback(false), _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEDtmfImpl::VoEDtmfImpl() - ctor");
 }
 
-VoEDtmfImpl::~VoEDtmfImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEDtmfImpl::~VoEDtmfImpl() - dtor");
+VoEDtmfImpl::~VoEDtmfImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEDtmfImpl::~VoEDtmfImpl() - dtor");
 }
 
 int VoEDtmfImpl::SendTelephoneEvent(int channel,
                                     int eventCode,
                                     bool outOfBand,
                                     int lengthMs,
-                                    int attenuationDb)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
-                 "length=%d, attenuationDb=%d)",
-                 channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SendTelephoneEvent() failed to locate channel");
-        return -1;
-    }
-    if (!channelPtr->Sending())
-    {
-        _shared->SetLastError(VE_NOT_SENDING, kTraceError,
-            "SendTelephoneEvent() sending is not active");
-        return -1;
-    }
+                                    int attenuationDb) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
+               "length=%d, attenuationDb=%d)",
+               channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SendTelephoneEvent() failed to locate channel");
+    return -1;
+  }
+  if (!channelPtr->Sending()) {
+    _shared->SetLastError(VE_NOT_SENDING, kTraceError,
+                          "SendTelephoneEvent() sending is not active");
+    return -1;
+  }
 
-    // Sanity check
-    const int maxEventCode = outOfBand ?
-        static_cast<int>(kMaxTelephoneEventCode) :
-        static_cast<int>(kMaxDtmfEventCode);
-    const bool testFailed = ((eventCode < 0) ||
-        (eventCode > maxEventCode) ||
-        (lengthMs < kMinTelephoneEventDuration) ||
-        (lengthMs > kMaxTelephoneEventDuration) ||
-        (attenuationDb < kMinTelephoneEventAttenuation) ||
-        (attenuationDb > kMaxTelephoneEventAttenuation));
-    if (testFailed)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SendTelephoneEvent() invalid parameter(s)");
-        return -1;
-    }
+  // Sanity check
+  const int maxEventCode = outOfBand ? static_cast<int>(kMaxTelephoneEventCode)
+                                     : static_cast<int>(kMaxDtmfEventCode);
+  const bool testFailed = ((eventCode < 0) || (eventCode > maxEventCode) ||
+                           (lengthMs < kMinTelephoneEventDuration) ||
+                           (lengthMs > kMaxTelephoneEventDuration) ||
+                           (attenuationDb < kMinTelephoneEventAttenuation) ||
+                           (attenuationDb > kMaxTelephoneEventAttenuation));
+  if (testFailed) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SendTelephoneEvent() invalid parameter(s)");
+    return -1;
+  }
 
-    const bool isDtmf =
-        (eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
-    const bool playDtmfToneDirect =
-        isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
+  const bool isDtmf = (eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
+  const bool playDtmfToneDirect =
+      isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
 
-    if (playDtmfToneDirect)
-    {
-        // Mute the microphone signal while playing back the tone directly.
-        // This is to reduce the risk of introducing echo from the added output.
-        _shared->transmit_mixer()->UpdateMuteMicrophoneTime(lengthMs);
+  if (playDtmfToneDirect) {
+    // Mute the microphone signal while playing back the tone directly.
+    // This is to reduce the risk of introducing echo from the added output.
+    _shared->transmit_mixer()->UpdateMuteMicrophoneTime(lengthMs);
 
-        // Play out local feedback tone directly (same approach for both inband
-        // and outband).
-        // Reduce the length of the the tone with 80ms to reduce risk of echo.
-        // For non-direct feedback, outband and inband cases are handled
-        // differently.
-        _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs - 80,
-                                            attenuationDb);
-    }
+    // Play out local feedback tone directly (same approach for both inband
+    // and outband).
+    // Reduce the length of the the tone with 80ms to reduce risk of echo.
+    // For non-direct feedback, outband and inband cases are handled
+    // differently.
+    _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs - 80,
+                                          attenuationDb);
+  }
 
-    if (outOfBand)
-    {
-        // The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
-        // an event is transmitted. It is up to the VoE to utilize it or not.
-        // This flag ensures that feedback/playout is enabled; however, the
-        // channel object must still parse out the Dtmf events (0-15) from
-        // all possible events (0-255).
-        const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
+  if (outOfBand) {
+    // The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
+    // an event is transmitted. It is up to the VoE to utilize it or not.
+    // This flag ensures that feedback/playout is enabled; however, the
+    // channel object must still parse out the Dtmf events (0-15) from
+    // all possible events (0-255).
+    const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
 
-        return channelPtr->SendTelephoneEventOutband(eventCode,
-                                                     lengthMs,
-                                                     attenuationDb,
-                                                     playDTFMEvent);
-    }
-    else
-    {
-        // For Dtmf tones, we want to ensure that inband tones are played out
-        // in sync with the transmitted audio. This flag is utilized by the
-        // channel object to determine if the queued Dtmf e vent shall also
-        // be fed to the output mixer in the same step as input audio is
-        // replaced by inband Dtmf tones.
-        const bool playDTFMEvent =
-            (isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
+    return channelPtr->SendTelephoneEventOutband(eventCode, lengthMs,
+                                                 attenuationDb, playDTFMEvent);
+  } else {
+    // For Dtmf tones, we want to ensure that inband tones are played out
+    // in sync with the transmitted audio. This flag is utilized by the
+    // channel object to determine if the queued Dtmf e vent shall also
+    // be fed to the output mixer in the same step as input audio is
+    // replaced by inband Dtmf tones.
+    const bool playDTFMEvent =
+        (isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
 
-        return channelPtr->SendTelephoneEventInband(eventCode,
-                                                    lengthMs,
-                                                    attenuationDb,
-                                                    playDTFMEvent);
-    }
+    return channelPtr->SendTelephoneEventInband(eventCode, lengthMs,
+                                                attenuationDb, playDTFMEvent);
+  }
 }
 
 int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel,
-                                                  unsigned char type)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetSendTelephoneEventPayloadType(channel=%d, type=%u)",
-                 channel, type);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetSendTelephoneEventPayloadType() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetSendTelephoneEventPayloadType(type);
+                                                  unsigned char type) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetSendTelephoneEventPayloadType(channel=%d, type=%u)", channel,
+               type);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "SetSendTelephoneEventPayloadType() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetSendTelephoneEventPayloadType(type);
 }
 
 int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel,
-                                                  unsigned char& type)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetSendTelephoneEventPayloadType(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetSendTelephoneEventPayloadType() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetSendTelephoneEventPayloadType(type);
+                                                  unsigned char& type) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSendTelephoneEventPayloadType(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "GetSendTelephoneEventPayloadType() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetSendTelephoneEventPayloadType(type);
 }
 
-int VoEDtmfImpl::PlayDtmfTone(int eventCode,
-                              int lengthMs,
-                              int attenuationDb)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
-                 eventCode, lengthMs, attenuationDb);
+int VoEDtmfImpl::PlayDtmfTone(int eventCode, int lengthMs, int attenuationDb) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
+               eventCode, lengthMs, attenuationDb);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (!_shared->audio_device()->Playing())
-    {
-        _shared->SetLastError(VE_NOT_PLAYING, kTraceError,
-            "PlayDtmfTone() no channel is playing out");
-        return -1;
-    }
-    if ((eventCode < kMinDtmfEventCode) ||
-        (eventCode > kMaxDtmfEventCode) ||
-        (lengthMs < kMinTelephoneEventDuration) ||
-        (lengthMs > kMaxTelephoneEventDuration) ||
-        (attenuationDb <kMinTelephoneEventAttenuation) ||
-        (attenuationDb > kMaxTelephoneEventAttenuation))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-        "PlayDtmfTone() invalid tone parameter(s)");
-        return -1;
-    }
-    return _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs,
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (!_shared->audio_device()->Playing()) {
+    _shared->SetLastError(VE_NOT_PLAYING, kTraceError,
+                          "PlayDtmfTone() no channel is playing out");
+    return -1;
+  }
+  if ((eventCode < kMinDtmfEventCode) || (eventCode > kMaxDtmfEventCode) ||
+      (lengthMs < kMinTelephoneEventDuration) ||
+      (lengthMs > kMaxTelephoneEventDuration) ||
+      (attenuationDb < kMinTelephoneEventAttenuation) ||
+      (attenuationDb > kMaxTelephoneEventAttenuation)) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "PlayDtmfTone() invalid tone parameter(s)");
+    return -1;
+  }
+  return _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs,
                                                attenuationDb);
 }
 
-int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
-                 (int)enable, (int)directFeedback);
+int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
+               (int)enable, (int)directFeedback);
 
-    CriticalSectionScoped sc(_shared->crit_sec());
+  CriticalSectionScoped sc(_shared->crit_sec());
 
-    _dtmfFeedback = enable;
-    _dtmfDirectFeedback = directFeedback;
+  _dtmfFeedback = enable;
+  _dtmfDirectFeedback = directFeedback;
 
-    return 0;
+  return 0;
 }
 
-int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetDtmfFeedbackStatus()");
+int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetDtmfFeedbackStatus()");
 
-    CriticalSectionScoped sc(_shared->crit_sec());
+  CriticalSectionScoped sc(_shared->crit_sec());
 
-    enabled = _dtmfFeedback;
-    directFeedback = _dtmfDirectFeedback;
+  enabled = _dtmfFeedback;
+  directFeedback = _dtmfDirectFeedback;
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
-        enabled, directFeedback);
-    return 0;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
+               enabled, directFeedback);
+  return 0;
 }
 #endif  // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
 
diff --git a/webrtc/voice_engine/voe_dtmf_impl.h b/webrtc/voice_engine/voe_dtmf_impl.h
index 81a95c0..a62188a 100644
--- a/webrtc/voice_engine/voe_dtmf_impl.h
+++ b/webrtc/voice_engine/voe_dtmf_impl.h
@@ -14,42 +14,38 @@
 #include "webrtc/voice_engine/include/voe_dtmf.h"
 #include "webrtc/voice_engine/shared_data.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VoEDtmfImpl : public VoEDtmf
-{
-public:
-    virtual int SendTelephoneEvent(
-        int channel,
-        int eventCode,
-        bool outOfBand = true,
-        int lengthMs = 160,
-        int attenuationDb = 10);
+class VoEDtmfImpl : public VoEDtmf {
+ public:
+  int SendTelephoneEvent(int channel,
+                         int eventCode,
+                         bool outOfBand = true,
+                         int lengthMs = 160,
+                         int attenuationDb = 10) override;
 
-    virtual int SetSendTelephoneEventPayloadType(int channel,
-                                                 unsigned char type);
+  int SetSendTelephoneEventPayloadType(int channel,
+                                       unsigned char type) override;
 
-    virtual int GetSendTelephoneEventPayloadType(int channel,
-                                                 unsigned char& type);
+  int GetSendTelephoneEventPayloadType(int channel,
+                                       unsigned char& type) override;
 
-    virtual int SetDtmfFeedbackStatus(bool enable,
-        bool directFeedback = false);
+  int SetDtmfFeedbackStatus(bool enable, bool directFeedback = false) override;
 
-    virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback);
+  int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) override;
 
-    virtual int PlayDtmfTone(int eventCode,
-                             int lengthMs = 200,
-                             int attenuationDb = 10);
+  int PlayDtmfTone(int eventCode,
+                   int lengthMs = 200,
+                   int attenuationDb = 10) override;
 
-protected:
-    VoEDtmfImpl(voe::SharedData* shared);
-    virtual ~VoEDtmfImpl();
+ protected:
+  VoEDtmfImpl(voe::SharedData* shared);
+  ~VoEDtmfImpl() override;
 
-private:
-    bool _dtmfFeedback;
-    bool _dtmfDirectFeedback;
-    voe::SharedData* _shared;
+ private:
+  bool _dtmfFeedback;
+  bool _dtmfDirectFeedback;
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_external_media_impl.cc b/webrtc/voice_engine/voe_external_media_impl.cc
index 7c52692..0c29fd4 100644
--- a/webrtc/voice_engine/voe_external_media_impl.cc
+++ b/webrtc/voice_engine/voe_external_media_impl.cc
@@ -20,18 +20,16 @@
 
 namespace webrtc {
 
-VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
@@ -40,167 +38,145 @@
 VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared)
     :
 #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
-    playout_delay_ms_(0),
+      playout_delay_ms_(0),
 #endif
-    shared_(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
-                 "VoEExternalMediaImpl() - ctor");
+      shared_(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
+               "VoEExternalMediaImpl() - ctor");
 }
 
-VoEExternalMediaImpl::~VoEExternalMediaImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
-                 "~VoEExternalMediaImpl() - dtor");
+VoEExternalMediaImpl::~VoEExternalMediaImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
+               "~VoEExternalMediaImpl() - dtor");
 }
 
 int VoEExternalMediaImpl::RegisterExternalMediaProcessing(
     int channel,
     ProcessingTypes type,
-    VoEMediaProcess& processObject)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
-                 "RegisterExternalMediaProcessing(channel=%d, type=%d, "
-                 "processObject=0x%x)", channel, type, &processObject);
-    if (!shared_->statistics().Initialized())
-    {
-        shared_->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    switch (type)
-    {
-        case kPlaybackPerChannel:
-        case kRecordingPerChannel:
-        {
-            voe::ChannelOwner ch =
-                shared_->channel_manager().GetChannel(channel);
-            voe::Channel* channelPtr = ch.channel();
-            if (channelPtr == NULL)
-            {
-                shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                    "RegisterExternalMediaProcessing() failed to locate "
-                    "channel");
-                return -1;
-            }
-            return channelPtr->RegisterExternalMediaProcessing(type,
-                                                               processObject);
-        }
-        case kPlaybackAllChannelsMixed:
-        {
-            return shared_->output_mixer()->RegisterExternalMediaProcessing(
-                processObject);
-        }
-        case kRecordingAllChannelsMixed:
-        case kRecordingPreprocessing:
-        {
-            return shared_->transmit_mixer()->RegisterExternalMediaProcessing(
-                &processObject, type);
-        }
-    }
+    VoEMediaProcess& processObject) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+               "RegisterExternalMediaProcessing(channel=%d, type=%d, "
+               "processObject=0x%x)",
+               channel, type, &processObject);
+  if (!shared_->statistics().Initialized()) {
+    shared_->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
+  }
+  switch (type) {
+    case kPlaybackPerChannel:
+    case kRecordingPerChannel: {
+      voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+      voe::Channel* channelPtr = ch.channel();
+      if (channelPtr == NULL) {
+        shared_->SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterExternalMediaProcessing() failed to locate "
+            "channel");
+        return -1;
+      }
+      return channelPtr->RegisterExternalMediaProcessing(type, processObject);
+    }
+    case kPlaybackAllChannelsMixed: {
+      return shared_->output_mixer()->RegisterExternalMediaProcessing(
+          processObject);
+    }
+    case kRecordingAllChannelsMixed:
+    case kRecordingPreprocessing: {
+      return shared_->transmit_mixer()->RegisterExternalMediaProcessing(
+          &processObject, type);
+    }
+  }
+  return -1;
 }
 
 int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
     int channel,
-    ProcessingTypes type)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
-                 "DeRegisterExternalMediaProcessing(channel=%d)", channel);
-    if (!shared_->statistics().Initialized())
-    {
-        shared_->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    switch (type)
-    {
-        case kPlaybackPerChannel:
-        case kRecordingPerChannel:
-        {
-            voe::ChannelOwner ch =
-                shared_->channel_manager().GetChannel(channel);
-            voe::Channel* channelPtr = ch.channel();
-            if (channelPtr == NULL)
-            {
-                shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                    "RegisterExternalMediaProcessing() "
-                    "failed to locate channel");
-                return -1;
-            }
-            return channelPtr->DeRegisterExternalMediaProcessing(type);
-        }
-        case kPlaybackAllChannelsMixed:
-        {
-            return shared_->output_mixer()->
-                DeRegisterExternalMediaProcessing();
-        }
-        case kRecordingAllChannelsMixed:
-        case kRecordingPreprocessing:
-        {
-            return shared_->transmit_mixer()->
-                DeRegisterExternalMediaProcessing(type);
-        }
-    }
+    ProcessingTypes type) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+               "DeRegisterExternalMediaProcessing(channel=%d)", channel);
+  if (!shared_->statistics().Initialized()) {
+    shared_->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
+  }
+  switch (type) {
+    case kPlaybackPerChannel:
+    case kRecordingPerChannel: {
+      voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+      voe::Channel* channelPtr = ch.channel();
+      if (channelPtr == NULL) {
+        shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                              "RegisterExternalMediaProcessing() "
+                              "failed to locate channel");
+        return -1;
+      }
+      return channelPtr->DeRegisterExternalMediaProcessing(type);
+    }
+    case kPlaybackAllChannelsMixed: {
+      return shared_->output_mixer()->DeRegisterExternalMediaProcessing();
+    }
+    case kRecordingAllChannelsMixed:
+    case kRecordingPreprocessing: {
+      return shared_->transmit_mixer()->DeRegisterExternalMediaProcessing(type);
+    }
+  }
+  return -1;
 }
 
 int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
                                         AudioFrame* frame) {
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
-                 VoEId(shared_->instance_id(), channel),
-                 "GetAudioFrame(channel=%d, desired_sample_rate_hz=%d)",
-                 channel, desired_sample_rate_hz);
-    if (!shared_->statistics().Initialized())
-    {
-        shared_->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetAudioFrame() failed to locate channel");
-        return -1;
-    }
-    if (!channelPtr->ExternalMixing()) {
-        shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
-            "GetAudioFrame() was called on channel that is not"
-            " externally mixed.");
-        return -1;
-    }
-    if (!channelPtr->Playing()) {
-        shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
-            "GetAudioFrame() was called on channel that is not playing.");
-        return -1;
-    }
-    if (desired_sample_rate_hz == -1) {
-          shared_->SetLastError(VE_BAD_ARGUMENT, kTraceError,
-              "GetAudioFrame() was called with bad sample rate.");
-          return -1;
-    }
-    frame->sample_rate_hz_ = desired_sample_rate_hz == 0 ? -1 :
-                             desired_sample_rate_hz;
-    return channelPtr->GetAudioFrame(channel, *frame);
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+               VoEId(shared_->instance_id(), channel),
+               "GetAudioFrame(channel=%d, desired_sample_rate_hz=%d)", channel,
+               desired_sample_rate_hz);
+  if (!shared_->statistics().Initialized()) {
+    shared_->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetAudioFrame() failed to locate channel");
+    return -1;
+  }
+  if (!channelPtr->ExternalMixing()) {
+    shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
+                          "GetAudioFrame() was called on channel that is not"
+                          " externally mixed.");
+    return -1;
+  }
+  if (!channelPtr->Playing()) {
+    shared_->SetLastError(
+        VE_INVALID_OPERATION, kTraceError,
+        "GetAudioFrame() was called on channel that is not playing.");
+    return -1;
+  }
+  if (desired_sample_rate_hz == -1) {
+    shared_->SetLastError(VE_BAD_ARGUMENT, kTraceError,
+                          "GetAudioFrame() was called with bad sample rate.");
+    return -1;
+  }
+  frame->sample_rate_hz_ =
+      desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz;
+  return channelPtr->GetAudioFrame(channel, *frame);
 }
 
 int VoEExternalMediaImpl::SetExternalMixing(int channel, bool enable) {
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
-                 VoEId(shared_->instance_id(), channel),
-                 "SetExternalMixing(channel=%d, enable=%d)", channel, enable);
-    if (!shared_->statistics().Initialized())
-    {
-        shared_->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetExternalMixing() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetExternalMixing(enable);
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+               VoEId(shared_->instance_id(), channel),
+               "SetExternalMixing(channel=%d, enable=%d)", channel, enable);
+  if (!shared_->statistics().Initialized()) {
+    shared_->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetExternalMixing() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetExternalMixing(enable);
 }
 
 #endif  // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
diff --git a/webrtc/voice_engine/voe_external_media_impl.h b/webrtc/voice_engine/voe_external_media_impl.h
index 14da268..22e963b 100644
--- a/webrtc/voice_engine/voe_external_media_impl.h
+++ b/webrtc/voice_engine/voe_external_media_impl.h
@@ -17,30 +17,27 @@
 
 namespace webrtc {
 
-class VoEExternalMediaImpl : public VoEExternalMedia
-{
-public:
-    virtual int RegisterExternalMediaProcessing(
-        int channel,
-        ProcessingTypes type,
-        VoEMediaProcess& processObject);
+class VoEExternalMediaImpl : public VoEExternalMedia {
+ public:
+  int RegisterExternalMediaProcessing(int channel,
+                                      ProcessingTypes type,
+                                      VoEMediaProcess& processObject) override;
 
-    virtual int DeRegisterExternalMediaProcessing(
-        int channel,
-        ProcessingTypes type);
+  int DeRegisterExternalMediaProcessing(int channel,
+                                        ProcessingTypes type) override;
 
+  int GetAudioFrame(int channel,
+                    int desired_sample_rate_hz,
+                    AudioFrame* frame) override;
 
-    virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
-                              AudioFrame* frame);
+  int SetExternalMixing(int channel, bool enable) override;
 
-    virtual int SetExternalMixing(int channel, bool enable);
+ protected:
+  VoEExternalMediaImpl(voe::SharedData* shared);
+  ~VoEExternalMediaImpl() override;
 
-protected:
-    VoEExternalMediaImpl(voe::SharedData* shared);
-    virtual ~VoEExternalMediaImpl();
-
-private:
-    voe::SharedData* shared_;
+ private:
+  voe::SharedData* shared_;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_file_impl.cc b/webrtc/voice_engine/voe_file_impl.cc
index 95e9d21..ccf4ec0 100644
--- a/webrtc/voice_engine/voe_file_impl.cc
+++ b/webrtc/voice_engine/voe_file_impl.cc
@@ -22,71 +22,60 @@
 
 namespace webrtc {
 
-VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_FILE_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_FILE_API
 
-VoEFileImpl::VoEFileImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEFileImpl::VoEFileImpl() - ctor");
+VoEFileImpl::VoEFileImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEFileImpl::VoEFileImpl() - ctor");
 }
 
-VoEFileImpl::~VoEFileImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEFileImpl::~VoEFileImpl() - dtor");
+VoEFileImpl::~VoEFileImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEFileImpl::~VoEFileImpl() - dtor");
 }
 
-int VoEFileImpl::StartPlayingFileLocally(
-    int channel,
-    const char fileNameUTF8[1024],
-    bool loop, FileFormats format,
-    float volumeScaling,
-    int startPointMs,
-    int stopPointMs)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, "
-                 "loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d,"
-                 " stopPointMs=%d)",
-                 channel, fileNameUTF8, loop, format, volumeScaling,
-                 startPointMs, stopPointMs);
-    assert(1024 == FileWrapper::kMaxFileNameSize);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StartPlayingFileLocally() failed to locate channel");
-        return -1;
-    }
+int VoEFileImpl::StartPlayingFileLocally(int channel,
+                                         const char fileNameUTF8[1024],
+                                         bool loop,
+                                         FileFormats format,
+                                         float volumeScaling,
+                                         int startPointMs,
+                                         int stopPointMs) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, "
+               "loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d,"
+               " stopPointMs=%d)",
+               channel, fileNameUTF8, loop, format, volumeScaling, startPointMs,
+               stopPointMs);
+  assert(1024 == FileWrapper::kMaxFileNameSize);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StartPlayingFileLocally() failed to locate channel");
+    return -1;
+  }
 
-    return channelPtr->StartPlayingFileLocally(fileNameUTF8,
-                                               loop,
-                                               format,
-                                               startPointMs,
-                                               volumeScaling,
-                                               stopPointMs,
-                                               NULL);
+  return channelPtr->StartPlayingFileLocally(fileNameUTF8, loop, format,
+                                             startPointMs, volumeScaling,
+                                             stopPointMs, NULL);
 }
 
 int VoEFileImpl::StartPlayingFileLocally(int channel,
@@ -94,74 +83,61 @@
                                          FileFormats format,
                                          float volumeScaling,
                                          int startPointMs,
-                                         int stopPointMs)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartPlayingFileLocally(channel=%d, stream, format=%d, "
-                 "volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)",
-                 channel, format, volumeScaling, startPointMs, stopPointMs);
+                                         int stopPointMs) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartPlayingFileLocally(channel=%d, stream, format=%d, "
+               "volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)",
+               channel, format, volumeScaling, startPointMs, stopPointMs);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StartPlayingFileLocally() failed to locate channel");
-        return -1;
-    }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StartPlayingFileLocally() failed to locate channel");
+    return -1;
+  }
 
-    return channelPtr->StartPlayingFileLocally(stream,
-                                               format,
-                                               startPointMs,
-                                               volumeScaling,
-                                               stopPointMs,
-                                               NULL);
+  return channelPtr->StartPlayingFileLocally(stream, format, startPointMs,
+                                             volumeScaling, stopPointMs, NULL);
 }
 
-int VoEFileImpl::StopPlayingFileLocally(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StopPlayingFileLocally()");
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StopPlayingFileLocally() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->StopPlayingFileLocally();
+int VoEFileImpl::StopPlayingFileLocally(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StopPlayingFileLocally()");
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StopPlayingFileLocally() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->StopPlayingFileLocally();
 }
 
-int VoEFileImpl::IsPlayingFileLocally(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "IsPlayingFileLocally(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StopPlayingFileLocally() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->IsPlayingFileLocally();
+int VoEFileImpl::IsPlayingFileLocally(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "IsPlayingFileLocally(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StopPlayingFileLocally() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->IsPlayingFileLocally();
 }
 
 int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
@@ -169,426 +145,338 @@
                                               bool loop,
                                               bool mixWithMicrophone,
                                               FileFormats format,
-                                              float volumeScaling)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
-                 "loop=%d, mixWithMicrophone=%d, format=%d, "
-                 "volumeScaling=%5.3f)",
-                 channel, fileNameUTF8, loop, mixWithMicrophone, format,
-                 volumeScaling);
-    assert(1024 == FileWrapper::kMaxFileNameSize);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+                                              float volumeScaling) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
+               "loop=%d, mixWithMicrophone=%d, format=%d, "
+               "volumeScaling=%5.3f)",
+               channel, fileNameUTF8, loop, mixWithMicrophone, format,
+               volumeScaling);
+  assert(1024 == FileWrapper::kMaxFileNameSize);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+
+  const uint32_t startPointMs(0);
+  const uint32_t stopPointMs(0);
+
+  if (channel == -1) {
+    int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
+        fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
+        NULL);
+    if (res) {
+      WEBRTC_TRACE(
+          kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+          "StartPlayingFileAsMicrophone() failed to start playing file");
+      return (-1);
+    } else {
+      _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
+      return (0);
+    }
+  } else {
+    // Add file after demultiplexing <=> affects one channel only
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(
+          VE_CHANNEL_NOT_VALID, kTraceError,
+          "StartPlayingFileAsMicrophone() failed to locate channel");
+      return -1;
     }
 
-    const uint32_t startPointMs(0);
-    const uint32_t stopPointMs(0);
-
-    if (channel == -1)
-    {
-        int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
-            fileNameUTF8,
-            loop,
-            format,
-            startPointMs,
-            volumeScaling,
-            stopPointMs,
-            NULL);
-        if (res)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartPlayingFileAsMicrophone() failed to start playing file");
-            return(-1);
-        }
-        else
-        {
-            _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
-            return(0);
-        }
+    int res = channelPtr->StartPlayingFileAsMicrophone(
+        fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
+        NULL);
+    if (res) {
+      WEBRTC_TRACE(
+          kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+          "StartPlayingFileAsMicrophone() failed to start playing file");
+      return -1;
+    } else {
+      channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+      return 0;
     }
-    else
-    {
-        // Add file after demultiplexing <=> affects one channel only
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StartPlayingFileAsMicrophone() failed to locate channel");
-            return -1;
-        }
-
-        int res = channelPtr->StartPlayingFileAsMicrophone(fileNameUTF8,
-                                                           loop,
-                                                           format,
-                                                           startPointMs,
-                                                           volumeScaling,
-                                                           stopPointMs,
-                                                           NULL);
-        if (res)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartPlayingFileAsMicrophone() failed to start playing file");
-            return -1;
-        }
-        else
-        {
-            channelPtr->SetMixWithMicStatus(mixWithMicrophone);
-            return 0;
-        }
-    }
+  }
 }
 
 int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
                                               InStream* stream,
                                               bool mixWithMicrophone,
                                               FileFormats format,
-                                              float volumeScaling)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartPlayingFileAsMicrophone(channel=%d, stream,"
-                 " mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)",
-                 channel, mixWithMicrophone, format, volumeScaling);
+                                              float volumeScaling) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartPlayingFileAsMicrophone(channel=%d, stream,"
+               " mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)",
+               channel, mixWithMicrophone, format, volumeScaling);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+
+  const uint32_t startPointMs(0);
+  const uint32_t stopPointMs(0);
+
+  if (channel == -1) {
+    int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
+        stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
+    if (res) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartPlayingFileAsMicrophone() failed to start "
+                   "playing stream");
+      return (-1);
+    } else {
+      _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
+      return (0);
+    }
+  } else {
+    // Add file after demultiplexing <=> affects one channel only
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(
+          VE_CHANNEL_NOT_VALID, kTraceError,
+          "StartPlayingFileAsMicrophone() failed to locate channel");
+      return -1;
     }
 
-    const uint32_t startPointMs(0);
-    const uint32_t stopPointMs(0);
-
-    if (channel == -1)
-    {
-        int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
-            stream,
-            format,
-            startPointMs,
-            volumeScaling,
-            stopPointMs,
-            NULL);
-        if (res)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartPlayingFileAsMicrophone() failed to start "
-                "playing stream");
-            return(-1);
-        }
-        else
-        {
-            _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
-            return(0);
-        }
+    int res = channelPtr->StartPlayingFileAsMicrophone(
+        stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
+    if (res) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartPlayingFileAsMicrophone() failed to start "
+                   "playing stream");
+      return -1;
+    } else {
+      channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+      return 0;
     }
-    else
-    {
-        // Add file after demultiplexing <=> affects one channel only
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StartPlayingFileAsMicrophone() failed to locate channel");
-            return -1;
-        }
-
-        int res = channelPtr->StartPlayingFileAsMicrophone(
-            stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
-        if (res)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartPlayingFileAsMicrophone() failed to start "
-                "playing stream");
-            return -1;
-        }
-        else
-        {
-            channelPtr->SetMixWithMicStatus(mixWithMicrophone);
-            return 0;
-        }
-    }
+  }
 }
 
-int VoEFileImpl::StopPlayingFileAsMicrophone(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StopPlayingFileAsMicrophone(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+int VoEFileImpl::StopPlayingFileAsMicrophone(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StopPlayingFileAsMicrophone(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    // Stop adding file before demultiplexing <=> affects all channels
+    return _shared->transmit_mixer()->StopPlayingFileAsMicrophone();
+  } else {
+    // Stop adding file after demultiplexing <=> affects one channel only
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(
+          VE_CHANNEL_NOT_VALID, kTraceError,
+          "StopPlayingFileAsMicrophone() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        // Stop adding file before demultiplexing <=> affects all channels
-        return _shared->transmit_mixer()->StopPlayingFileAsMicrophone();
-    }
-    else
-    {
-        // Stop adding file after demultiplexing <=> affects one channel only
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StopPlayingFileAsMicrophone() failed to locate channel");
-            return -1;
-        }
-        return channelPtr->StopPlayingFileAsMicrophone();
-    }
+    return channelPtr->StopPlayingFileAsMicrophone();
+  }
 }
 
-int VoEFileImpl::IsPlayingFileAsMicrophone(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "IsPlayingFileAsMicrophone(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+int VoEFileImpl::IsPlayingFileAsMicrophone(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "IsPlayingFileAsMicrophone(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->transmit_mixer()->IsPlayingFileAsMicrophone();
+  } else {
+    // Stop adding file after demultiplexing <=> affects one channel only
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(
+          VE_CHANNEL_NOT_VALID, kTraceError,
+          "IsPlayingFileAsMicrophone() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->transmit_mixer()->IsPlayingFileAsMicrophone();
-    }
-    else
-    {
-        // Stop adding file after demultiplexing <=> affects one channel only
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "IsPlayingFileAsMicrophone() failed to locate channel");
-            return -1;
-        }
-        return channelPtr->IsPlayingFileAsMicrophone();
-    }
+    return channelPtr->IsPlayingFileAsMicrophone();
+  }
 }
 
-int VoEFileImpl::StartRecordingPlayout(
-    int channel, const char* fileNameUTF8, CodecInst* compression,
-    int maxSizeBytes)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartRecordingPlayout(channel=%d, fileNameUTF8=%s, "
-                 "compression, maxSizeBytes=%d)",
-                 channel, fileNameUTF8, maxSizeBytes);
-    assert(1024 == FileWrapper::kMaxFileNameSize);
+int VoEFileImpl::StartRecordingPlayout(int channel,
+                                       const char* fileNameUTF8,
+                                       CodecInst* compression,
+                                       int maxSizeBytes) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartRecordingPlayout(channel=%d, fileNameUTF8=%s, "
+               "compression, maxSizeBytes=%d)",
+               channel, fileNameUTF8, maxSizeBytes);
+  assert(1024 == FileWrapper::kMaxFileNameSize);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->StartRecordingPlayout(fileNameUTF8,
+                                                          compression);
+  } else {
+    // Add file after demultiplexing <=> affects one channel only
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                            "StartRecordingPlayout() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->StartRecordingPlayout
-          (fileNameUTF8, compression);
-    }
-    else
-    {
-        // Add file after demultiplexing <=> affects one channel only
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StartRecordingPlayout() failed to locate channel");
-            return -1;
-        }
-        return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
-    }
+    return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
+  }
 }
 
-int VoEFileImpl::StartRecordingPlayout(
-    int channel, OutStream* stream, CodecInst* compression)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartRecordingPlayout(channel=%d, stream, compression)",
-                 channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+int VoEFileImpl::StartRecordingPlayout(int channel,
+                                       OutStream* stream,
+                                       CodecInst* compression) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartRecordingPlayout(channel=%d, stream, compression)",
+               channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->StartRecordingPlayout(stream, compression);
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                            "StartRecordingPlayout() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->
-            StartRecordingPlayout(stream, compression);
-    }
-    else
-    {
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StartRecordingPlayout() failed to locate channel");
-            return -1;
-        }
-        return channelPtr->StartRecordingPlayout(stream, compression);
-    }
+    return channelPtr->StartRecordingPlayout(stream, compression);
+  }
 }
 
-int VoEFileImpl::StopRecordingPlayout(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StopRecordingPlayout(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+int VoEFileImpl::StopRecordingPlayout(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StopRecordingPlayout(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->StopRecordingPlayout();
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                            "StopRecordingPlayout() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->StopRecordingPlayout();
-    }
-    else
-    {
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "StopRecordingPlayout() failed to locate channel");
-            return -1;
-        }
-        return channelPtr->StopRecordingPlayout();
-    }
+    return channelPtr->StopRecordingPlayout();
+  }
 }
 
-int VoEFileImpl::StartRecordingMicrophone(
-    const char* fileNameUTF8, CodecInst* compression, int maxSizeBytes)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartRecordingMicrophone(fileNameUTF8=%s, compression, "
-                 "maxSizeBytes=%d)", fileNameUTF8, maxSizeBytes);
-    assert(1024 == FileWrapper::kMaxFileNameSize);
+int VoEFileImpl::StartRecordingMicrophone(const char* fileNameUTF8,
+                                          CodecInst* compression,
+                                          int maxSizeBytes) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartRecordingMicrophone(fileNameUTF8=%s, compression, "
+               "maxSizeBytes=%d)",
+               fileNameUTF8, maxSizeBytes);
+  assert(1024 == FileWrapper::kMaxFileNameSize);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (_shared->transmit_mixer()->StartRecordingMicrophone(fileNameUTF8,
-                                                          compression))
-    {
-        WEBRTC_TRACE(kTraceError, kTraceVoice,
-            VoEId(_shared->instance_id(), -1),
-            "StartRecordingMicrophone() failed to start recording");
-        return -1;
-    }
-    if (_shared->audio_device()->Recording())
-    {
-        return 0;
-    }
-    if (!_shared->ext_recording())
-    {
-        if (_shared->audio_device()->InitRecording() != 0)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartRecordingMicrophone() failed to initialize recording");
-            return -1;
-        }
-        if (_shared->audio_device()->StartRecording() != 0)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartRecordingMicrophone() failed to start recording");
-            return -1;
-        }
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (_shared->transmit_mixer()->StartRecordingMicrophone(fileNameUTF8,
+                                                          compression)) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "StartRecordingMicrophone() failed to start recording");
+    return -1;
+  }
+  if (_shared->audio_device()->Recording()) {
     return 0;
+  }
+  if (!_shared->ext_recording()) {
+    if (_shared->audio_device()->InitRecording() != 0) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartRecordingMicrophone() failed to initialize recording");
+      return -1;
+    }
+    if (_shared->audio_device()->StartRecording() != 0) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartRecordingMicrophone() failed to start recording");
+      return -1;
+    }
+  }
+  return 0;
 }
 
-int VoEFileImpl::StartRecordingMicrophone(
-    OutStream* stream, CodecInst* compression)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartRecordingMicrophone(stream, compression)");
+int VoEFileImpl::StartRecordingMicrophone(OutStream* stream,
+                                          CodecInst* compression) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartRecordingMicrophone(stream, compression)");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (_shared->transmit_mixer()->StartRecordingMicrophone(stream,
-                                                          compression) == -1)
-    {
-        WEBRTC_TRACE(kTraceError, kTraceVoice,
-            VoEId(_shared->instance_id(), -1),
-            "StartRecordingMicrophone() failed to start recording");
-        return -1;
-    }
-    if (_shared->audio_device()->Recording())
-    {
-        return 0;
-    }
-    if (!_shared->ext_recording())
-    {
-        if (_shared->audio_device()->InitRecording() != 0)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartRecordingMicrophone() failed to initialize recording");
-            return -1;
-        }
-        if (_shared->audio_device()->StartRecording() != 0)
-        {
-            WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StartRecordingMicrophone() failed to start recording");
-            return -1;
-        }
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (_shared->transmit_mixer()->StartRecordingMicrophone(stream,
+                                                          compression) == -1) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "StartRecordingMicrophone() failed to start recording");
+    return -1;
+  }
+  if (_shared->audio_device()->Recording()) {
     return 0;
+  }
+  if (!_shared->ext_recording()) {
+    if (_shared->audio_device()->InitRecording() != 0) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartRecordingMicrophone() failed to initialize recording");
+      return -1;
+    }
+    if (_shared->audio_device()->StartRecording() != 0) {
+      WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "StartRecordingMicrophone() failed to start recording");
+      return -1;
+    }
+  }
+  return 0;
 }
 
-int VoEFileImpl::StopRecordingMicrophone()
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StopRecordingMicrophone()");
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+int VoEFileImpl::StopRecordingMicrophone() {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StopRecordingMicrophone()");
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+
+  int err = 0;
+
+  // TODO(xians): consider removing Start/StopRecording() in
+  // Start/StopRecordingMicrophone() if no channel is recording.
+  if (_shared->NumOfSendingChannels() == 0 &&
+      _shared->audio_device()->Recording()) {
+    // Stop audio-device recording if no channel is recording
+    if (_shared->audio_device()->StopRecording() != 0) {
+      _shared->SetLastError(
+          VE_CANNOT_STOP_RECORDING, kTraceError,
+          "StopRecordingMicrophone() failed to stop recording");
+      err = -1;
     }
+  }
 
-    int err = 0;
+  if (_shared->transmit_mixer()->StopRecordingMicrophone() != 0) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "StopRecordingMicrophone() failed to stop recording to mixer");
+    err = -1;
+  }
 
-    // TODO(xians): consider removing Start/StopRecording() in
-    // Start/StopRecordingMicrophone() if no channel is recording.
-    if (_shared->NumOfSendingChannels() == 0 &&
-        _shared->audio_device()->Recording())
-    {
-        // Stop audio-device recording if no channel is recording
-        if (_shared->audio_device()->StopRecording() != 0)
-        {
-            _shared->SetLastError(VE_CANNOT_STOP_RECORDING, kTraceError,
-                "StopRecordingMicrophone() failed to stop recording");
-            err = -1;
-        }
-    }
-
-    if (_shared->transmit_mixer()->StopRecordingMicrophone() != 0)
-    {
-        WEBRTC_TRACE(kTraceError, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "StopRecordingMicrophone() failed to stop recording to mixer");
-        err = -1;
-    }
-
-    return err;
+  return err;
 }
 
 #endif  // #ifdef WEBRTC_VOICE_ENGINE_FILE_API
diff --git a/webrtc/voice_engine/voe_file_impl.h b/webrtc/voice_engine/voe_file_impl.h
index 584d0a1..5d28947 100644
--- a/webrtc/voice_engine/voe_file_impl.h
+++ b/webrtc/voice_engine/voe_file_impl.h
@@ -16,82 +16,78 @@
 
 namespace webrtc {
 
-class VoEFileImpl : public VoEFile
-{
-public:
-    // Playout file locally
+class VoEFileImpl : public VoEFile {
+ public:
+  // Playout file locally
 
-    virtual int StartPlayingFileLocally(
-        int channel,
-        const char fileNameUTF8[1024],
-        bool loop = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0,
-        int startPointMs = 0,
-        int stopPointMs = 0);
+  int StartPlayingFileLocally(int channel,
+                              const char fileNameUTF8[1024],
+                              bool loop = false,
+                              FileFormats format = kFileFormatPcm16kHzFile,
+                              float volumeScaling = 1.0,
+                              int startPointMs = 0,
+                              int stopPointMs = 0) override;
 
-    virtual int StartPlayingFileLocally(
-        int channel,
-        InStream* stream,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0,
-        int startPointMs = 0, int stopPointMs = 0);
+  int StartPlayingFileLocally(int channel,
+                              InStream* stream,
+                              FileFormats format = kFileFormatPcm16kHzFile,
+                              float volumeScaling = 1.0,
+                              int startPointMs = 0,
+                              int stopPointMs = 0) override;
 
-    virtual int StopPlayingFileLocally(int channel);
+  int StopPlayingFileLocally(int channel) override;
 
-    virtual int IsPlayingFileLocally(int channel);
+  int IsPlayingFileLocally(int channel) override;
 
-    // Use file as microphone input
+  // Use file as microphone input
 
-    virtual int StartPlayingFileAsMicrophone(
-        int channel,
-        const char fileNameUTF8[1024],
-        bool loop = false ,
-        bool mixWithMicrophone = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0);
+  int StartPlayingFileAsMicrophone(int channel,
+                                   const char fileNameUTF8[1024],
+                                   bool loop = false,
+                                   bool mixWithMicrophone = false,
+                                   FileFormats format = kFileFormatPcm16kHzFile,
+                                   float volumeScaling = 1.0) override;
 
-    virtual int StartPlayingFileAsMicrophone(
-        int channel,
-        InStream* stream,
-        bool mixWithMicrophone = false,
-        FileFormats format = kFileFormatPcm16kHzFile,
-        float volumeScaling = 1.0);
+  int StartPlayingFileAsMicrophone(int channel,
+                                   InStream* stream,
+                                   bool mixWithMicrophone = false,
+                                   FileFormats format = kFileFormatPcm16kHzFile,
+                                   float volumeScaling = 1.0) override;
 
-    virtual int StopPlayingFileAsMicrophone(int channel);
+  int StopPlayingFileAsMicrophone(int channel) override;
 
-    virtual int IsPlayingFileAsMicrophone(int channel);
+  int IsPlayingFileAsMicrophone(int channel) override;
 
-    // Record speaker signal to file
+  // Record speaker signal to file
 
-    virtual int StartRecordingPlayout(int channel,
-                                      const char* fileNameUTF8,
-                                      CodecInst* compression = NULL,
-                                      int maxSizeBytes = -1);
+  int StartRecordingPlayout(int channel,
+                            const char* fileNameUTF8,
+                            CodecInst* compression = NULL,
+                            int maxSizeBytes = -1) override;
 
-    virtual int StartRecordingPlayout(int channel,
-                                      OutStream* stream,
-                                      CodecInst* compression = NULL);
+  int StartRecordingPlayout(int channel,
+                            OutStream* stream,
+                            CodecInst* compression = NULL) override;
 
-    virtual int StopRecordingPlayout(int channel);
+  int StopRecordingPlayout(int channel) override;
 
-    // Record microphone signal to file
+  // Record microphone signal to file
 
-    virtual int StartRecordingMicrophone(const char* fileNameUTF8,
-                                         CodecInst* compression = NULL,
-                                         int maxSizeBytes = -1);
+  int StartRecordingMicrophone(const char* fileNameUTF8,
+                               CodecInst* compression = NULL,
+                               int maxSizeBytes = -1) override;
 
-    virtual int StartRecordingMicrophone(OutStream* stream,
-                                         CodecInst* compression = NULL);
+  int StartRecordingMicrophone(OutStream* stream,
+                               CodecInst* compression = NULL) override;
 
-    virtual int StopRecordingMicrophone();
+  int StopRecordingMicrophone() override;
 
-protected:
-    VoEFileImpl(voe::SharedData* shared);
-    virtual ~VoEFileImpl();
+ protected:
+  VoEFileImpl(voe::SharedData* shared);
+  ~VoEFileImpl() override;
 
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_hardware_impl.cc b/webrtc/voice_engine/voe_hardware_impl.cc
index e8388ee..8e9485c 100644
--- a/webrtc/voice_engine/voe_hardware_impl.cc
+++ b/webrtc/voice_engine/voe_hardware_impl.cc
@@ -17,515 +17,453 @@
 #include "webrtc/voice_engine/include/voe_errors.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
 
-VoEHardwareImpl::VoEHardwareImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEHardwareImpl() - ctor");
+VoEHardwareImpl::VoEHardwareImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEHardwareImpl() - ctor");
 }
 
-VoEHardwareImpl::~VoEHardwareImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "~VoEHardwareImpl() - dtor");
+VoEHardwareImpl::~VoEHardwareImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "~VoEHardwareImpl() - dtor");
 }
 
-int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
+int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
 
-    // Don't allow a change if VoE is initialized
-    if (_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_ALREADY_INITED, kTraceError);
-        return -1;
-    }
+  // Don't allow a change if VoE is initialized
+  if (_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_ALREADY_INITED, kTraceError);
+    return -1;
+  }
 
-    // Map to AudioDeviceModule::AudioLayer
-    AudioDeviceModule::AudioLayer
-        wantedLayer(AudioDeviceModule::kPlatformDefaultAudio);
-    switch (audioLayer)
-    {
-        case kAudioPlatformDefault:
-            // already set above
-            break;
-        case kAudioWindowsCore:
-            wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
-            break;
-        case kAudioWindowsWave:
-            wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
-            break;
-        case kAudioLinuxAlsa:
-            wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
-            break;
-        case kAudioLinuxPulse:
-            wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
-            break;
-    }
+  // Map to AudioDeviceModule::AudioLayer
+  AudioDeviceModule::AudioLayer wantedLayer(
+      AudioDeviceModule::kPlatformDefaultAudio);
+  switch (audioLayer) {
+    case kAudioPlatformDefault:
+      // already set above
+      break;
+    case kAudioWindowsCore:
+      wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
+      break;
+    case kAudioWindowsWave:
+      wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
+      break;
+    case kAudioLinuxAlsa:
+      wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
+      break;
+    case kAudioLinuxPulse:
+      wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
+      break;
+  }
 
-    // Save the audio device layer for Init()
-    _shared->set_audio_device_layer(wantedLayer);
+  // Save the audio device layer for Init()
+  _shared->set_audio_device_layer(wantedLayer);
 
-    return 0;
+  return 0;
 }
 
-int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetAudioDeviceLayer(devices=?)");
 
-    // Can always be called regardless of VoE state
+  // Can always be called regardless of VoE state
 
-    AudioDeviceModule::AudioLayer
-        activeLayer(AudioDeviceModule::kPlatformDefaultAudio);
+  AudioDeviceModule::AudioLayer activeLayer(
+      AudioDeviceModule::kPlatformDefaultAudio);
 
-    if (_shared->audio_device())
-    {
-        // Get active audio layer from ADM
-        if (_shared->audio_device()->ActiveAudioLayer(&activeLayer) != 0)
-        {
-            _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
-                "  Audio Device error");
-            return -1;
-        }
+  if (_shared->audio_device()) {
+    // Get active audio layer from ADM
+    if (_shared->audio_device()->ActiveAudioLayer(&activeLayer) != 0) {
+      _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+                            "  Audio Device error");
+      return -1;
     }
-    else
-    {
-        // Return VoE's internal layer setting
-        activeLayer = _shared->audio_device_layer();
-    }
+  } else {
+    // Return VoE's internal layer setting
+    activeLayer = _shared->audio_device_layer();
+  }
 
-    // Map to AudioLayers
-    switch (activeLayer)
-    {
-        case AudioDeviceModule::kPlatformDefaultAudio:
-            audioLayer = kAudioPlatformDefault;
-            break;
-        case AudioDeviceModule::kWindowsCoreAudio:
-            audioLayer = kAudioWindowsCore;
-            break;
-        case AudioDeviceModule::kWindowsWaveAudio:
-            audioLayer = kAudioWindowsWave;
-            break;
-        case AudioDeviceModule::kLinuxAlsaAudio:
-            audioLayer = kAudioLinuxAlsa;
-            break;
-        case AudioDeviceModule::kLinuxPulseAudio:
-            audioLayer = kAudioLinuxPulse;
-            break;
-        default:
-            _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
-                "  unknown audio layer");
-    }
+  // Map to AudioLayers
+  switch (activeLayer) {
+    case AudioDeviceModule::kPlatformDefaultAudio:
+      audioLayer = kAudioPlatformDefault;
+      break;
+    case AudioDeviceModule::kWindowsCoreAudio:
+      audioLayer = kAudioWindowsCore;
+      break;
+    case AudioDeviceModule::kWindowsWaveAudio:
+      audioLayer = kAudioWindowsWave;
+      break;
+    case AudioDeviceModule::kLinuxAlsaAudio:
+      audioLayer = kAudioLinuxAlsa;
+      break;
+    case AudioDeviceModule::kLinuxPulseAudio:
+      audioLayer = kAudioLinuxPulse;
+      break;
+    default:
+      _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+                            "  unknown audio layer");
+  }
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "  Output: audioLayer=%d", audioLayer);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "  Output: audioLayer=%d", audioLayer);
 
-    return 0;
+  return 0;
 }
-int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetNumOfRecordingDevices(devices=?)");
+int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetNumOfRecordingDevices(devices=?)");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    devices = static_cast<int> (_shared->audio_device()->RecordingDevices());
+  devices = static_cast<int>(_shared->audio_device()->RecordingDevices());
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1), "  Output: devices=%d", devices);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "  Output: devices=%d", devices);
 
-    return 0;
+  return 0;
 }
 
-int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetNumOfPlayoutDevices(devices=?)");
+int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetNumOfPlayoutDevices(devices=?)");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    devices = static_cast<int> (_shared->audio_device()->PlayoutDevices());
+  devices = static_cast<int>(_shared->audio_device()->PlayoutDevices());
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "  Output: devices=%d", devices);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "  Output: devices=%d", devices);
 
-    return 0;
+  return 0;
 }
 
 int VoEHardwareImpl::GetRecordingDeviceName(int index,
                                             char strNameUTF8[128],
-                                            char strGuidUTF8[128])
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRecordingDeviceName(index=%d)", index);
+                                            char strGuidUTF8[128]) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRecordingDeviceName(index=%d)", index);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (strNameUTF8 == NULL)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "GetRecordingDeviceName() invalid argument");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (strNameUTF8 == NULL) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "GetRecordingDeviceName() invalid argument");
+    return -1;
+  }
 
-    // Note that strGuidUTF8 is allowed to be NULL
+  // Note that strGuidUTF8 is allowed to be NULL
 
-    // Init len variable to length of supplied vectors
-    const uint16_t strLen = 128;
+  // Init len variable to length of supplied vectors
+  const uint16_t strLen = 128;
 
-    // Check if length has been changed in module
-    assert(strLen == kAdmMaxDeviceNameSize);
-    assert(strLen == kAdmMaxGuidSize);
+  // Check if length has been changed in module
+  assert(strLen == kAdmMaxDeviceNameSize);
+  assert(strLen == kAdmMaxGuidSize);
 
-    char name[strLen];
-    char guid[strLen];
+  char name[strLen];
+  char guid[strLen];
 
-    // Get names from module
-    if (_shared->audio_device()->RecordingDeviceName(index, name, guid) != 0)
-    {
-        _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
-            "GetRecordingDeviceName() failed to get device name");
-        return -1;
-    }
+  // Get names from module
+  if (_shared->audio_device()->RecordingDeviceName(index, name, guid) != 0) {
+    _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+                          "GetRecordingDeviceName() failed to get device name");
+    return -1;
+  }
 
-    // Copy to vectors supplied by user
-    strncpy(strNameUTF8, name, strLen);
+  // Copy to vectors supplied by user
+  strncpy(strNameUTF8, name, strLen);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "  Output: strNameUTF8=%s", strNameUTF8);
+
+  if (strGuidUTF8 != NULL) {
+    strncpy(strGuidUTF8, guid, strLen);
     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "  Output: strNameUTF8=%s", strNameUTF8);
+                 VoEId(_shared->instance_id(), -1), "  Output: strGuidUTF8=%s",
+                 strGuidUTF8);
+  }
 
-    if (strGuidUTF8 != NULL)
-    {
-        strncpy(strGuidUTF8, guid, strLen);
-        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-            VoEId(_shared->instance_id(), -1),
-            "  Output: strGuidUTF8=%s", strGuidUTF8);
-    }
-
-    return 0;
+  return 0;
 }
 
 int VoEHardwareImpl::GetPlayoutDeviceName(int index,
                                           char strNameUTF8[128],
-                                          char strGuidUTF8[128])
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetPlayoutDeviceName(index=%d)", index);
+                                          char strGuidUTF8[128]) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetPlayoutDeviceName(index=%d)", index);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (strNameUTF8 == NULL)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "GetPlayoutDeviceName() invalid argument");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (strNameUTF8 == NULL) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "GetPlayoutDeviceName() invalid argument");
+    return -1;
+  }
 
-    // Note that strGuidUTF8 is allowed to be NULL
+  // Note that strGuidUTF8 is allowed to be NULL
 
-    // Init len variable to length of supplied vectors
-    const uint16_t strLen = 128;
+  // Init len variable to length of supplied vectors
+  const uint16_t strLen = 128;
 
-    // Check if length has been changed in module
-    assert(strLen == kAdmMaxDeviceNameSize);
-    assert(strLen == kAdmMaxGuidSize);
+  // Check if length has been changed in module
+  assert(strLen == kAdmMaxDeviceNameSize);
+  assert(strLen == kAdmMaxGuidSize);
 
-    char name[strLen];
-    char guid[strLen];
+  char name[strLen];
+  char guid[strLen];
 
-    // Get names from module
-    if (_shared->audio_device()->PlayoutDeviceName(index, name, guid) != 0)
-    {
-        _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
-            "GetPlayoutDeviceName() failed to get device name");
-        return -1;
-    }
+  // Get names from module
+  if (_shared->audio_device()->PlayoutDeviceName(index, name, guid) != 0) {
+    _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+                          "GetPlayoutDeviceName() failed to get device name");
+    return -1;
+  }
 
-    // Copy to vectors supplied by user
-    strncpy(strNameUTF8, name, strLen);
+  // Copy to vectors supplied by user
+  strncpy(strNameUTF8, name, strLen);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "  Output: strNameUTF8=%s", strNameUTF8);
+
+  if (strGuidUTF8 != NULL) {
+    strncpy(strGuidUTF8, guid, strLen);
     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "  Output: strNameUTF8=%s", strNameUTF8);
+                 VoEId(_shared->instance_id(), -1), "  Output: strGuidUTF8=%s",
+                 strGuidUTF8);
+  }
 
-    if (strGuidUTF8 != NULL)
-    {
-        strncpy(strGuidUTF8, guid, strLen);
-        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-            VoEId(_shared->instance_id(), -1),
-            "  Output: strGuidUTF8=%s", strGuidUTF8);
-    }
-
-    return 0;
+  return 0;
 }
 
 int VoEHardwareImpl::SetRecordingDevice(int index,
-                                        StereoChannel recordingChannel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetRecordingDevice(index=%d, recordingChannel=%d)",
-                 index, (int) recordingChannel);
-    CriticalSectionScoped cs(_shared->crit_sec());
+                                        StereoChannel recordingChannel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetRecordingDevice(index=%d, recordingChannel=%d)", index,
+               (int)recordingChannel);
+  CriticalSectionScoped cs(_shared->crit_sec());
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+
+  bool isRecording(false);
+
+  // Store state about activated recording to be able to restore it after the
+  // recording device has been modified.
+  if (_shared->audio_device()->Recording()) {
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "SetRecordingDevice() device is modified while recording"
+                 " is active...");
+    isRecording = true;
+    if (_shared->audio_device()->StopRecording() == -1) {
+      _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+                            "SetRecordingDevice() unable to stop recording");
+      return -1;
+    }
+  }
+
+  // We let the module do the index sanity
+
+  // Set recording channel
+  AudioDeviceModule::ChannelType recCh = AudioDeviceModule::kChannelBoth;
+  switch (recordingChannel) {
+    case kStereoLeft:
+      recCh = AudioDeviceModule::kChannelLeft;
+      break;
+    case kStereoRight:
+      recCh = AudioDeviceModule::kChannelRight;
+      break;
+    case kStereoBoth:
+      // default setting kChannelBoth (<=> mono)
+      break;
+  }
+
+  if (_shared->audio_device()->SetRecordingChannel(recCh) != 0) {
+    _shared->SetLastError(
+        VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+        "SetRecordingChannel() unable to set the recording channel");
+  }
+
+  // Map indices to unsigned since underlying functions need that
+  uint16_t indexU = static_cast<uint16_t>(index);
+
+  int32_t res(0);
+
+  if (index == -1) {
+    res = _shared->audio_device()->SetRecordingDevice(
+        AudioDeviceModule::kDefaultCommunicationDevice);
+  } else if (index == -2) {
+    res = _shared->audio_device()->SetRecordingDevice(
+        AudioDeviceModule::kDefaultDevice);
+  } else {
+    res = _shared->audio_device()->SetRecordingDevice(indexU);
+  }
+
+  if (res != 0) {
+    _shared->SetLastError(
+        VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+        "SetRecordingDevice() unable to set the recording device");
+    return -1;
+  }
+
+  // Init microphone, so user can do volume settings etc
+  if (_shared->audio_device()->InitMicrophone() == -1) {
+    _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
+                          "SetRecordingDevice() cannot access microphone");
+  }
+
+  // Set number of channels
+  bool available = false;
+  if (_shared->audio_device()->StereoRecordingIsAvailable(&available) != 0) {
+    _shared->SetLastError(
+        VE_SOUNDCARD_ERROR, kTraceWarning,
+        "StereoRecordingIsAvailable() failed to query stereo recording");
+  }
+
+  if (_shared->audio_device()->SetStereoRecording(available) != 0) {
+    _shared->SetLastError(
+        VE_SOUNDCARD_ERROR, kTraceWarning,
+        "SetRecordingDevice() failed to set mono recording mode");
+  }
+
+  // Restore recording if it was enabled already when calling this function.
+  if (isRecording) {
+    if (!_shared->ext_recording()) {
+      WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "SetRecordingDevice() recording is now being restored...");
+      if (_shared->audio_device()->InitRecording() != 0) {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_shared->instance_id(), -1),
+                     "SetRecordingDevice() failed to initialize recording");
         return -1;
-    }
-
-    bool isRecording(false);
-
-    // Store state about activated recording to be able to restore it after the
-    // recording device has been modified.
-    if (_shared->audio_device()->Recording())
-    {
-        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                     "SetRecordingDevice() device is modified while recording"
-                     " is active...");
-        isRecording = true;
-        if (_shared->audio_device()->StopRecording() == -1)
-        {
-            _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
-                "SetRecordingDevice() unable to stop recording");
-            return -1;
-        }
-    }
-
-    // We let the module do the index sanity
-
-    // Set recording channel
-    AudioDeviceModule::ChannelType recCh =
-        AudioDeviceModule::kChannelBoth;
-    switch (recordingChannel)
-    {
-        case kStereoLeft:
-            recCh = AudioDeviceModule::kChannelLeft;
-            break;
-        case kStereoRight:
-            recCh = AudioDeviceModule::kChannelRight;
-            break;
-        case kStereoBoth:
-            // default setting kChannelBoth (<=> mono)
-            break;
-    }
-
-    if (_shared->audio_device()->SetRecordingChannel(recCh) != 0) {
-      _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
-          "SetRecordingChannel() unable to set the recording channel");
-    }
-
-    // Map indices to unsigned since underlying functions need that
-    uint16_t indexU = static_cast<uint16_t> (index);
-
-    int32_t res(0);
-
-    if (index == -1)
-    {
-        res = _shared->audio_device()->SetRecordingDevice(
-            AudioDeviceModule::kDefaultCommunicationDevice);
-    }
-    else if (index == -2)
-    {
-        res = _shared->audio_device()->SetRecordingDevice(
-            AudioDeviceModule::kDefaultDevice);
-    }
-    else
-    {
-        res = _shared->audio_device()->SetRecordingDevice(indexU);
-    }
-
-    if (res != 0)
-    {
-        _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
-            "SetRecordingDevice() unable to set the recording device");
+      }
+      if (_shared->audio_device()->StartRecording() != 0) {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_shared->instance_id(), -1),
+                     "SetRecordingDevice() failed to start recording");
         return -1;
+      }
     }
+  }
 
-    // Init microphone, so user can do volume settings etc
-    if (_shared->audio_device()->InitMicrophone() == -1)
-    {
-        _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
-            "SetRecordingDevice() cannot access microphone");
-    }
-
-    // Set number of channels
-    bool available = false;
-    if (_shared->audio_device()->StereoRecordingIsAvailable(&available) != 0) {
-      _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
-          "StereoRecordingIsAvailable() failed to query stereo recording");
-    }
-
-    if (_shared->audio_device()->SetStereoRecording(available) != 0)
-    {
-        _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
-            "SetRecordingDevice() failed to set mono recording mode");
-    }
-
-    // Restore recording if it was enabled already when calling this function.
-    if (isRecording)
-    {
-        if (!_shared->ext_recording())
-        {
-            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "SetRecordingDevice() recording is now being restored...");
-            if (_shared->audio_device()->InitRecording() != 0)
-            {
-                WEBRTC_TRACE(kTraceError, kTraceVoice,
-                    VoEId(_shared->instance_id(), -1),
-                    "SetRecordingDevice() failed to initialize recording");
-                return -1;
-            }
-            if (_shared->audio_device()->StartRecording() != 0)
-            {
-                WEBRTC_TRACE(kTraceError, kTraceVoice,
-                             VoEId(_shared->instance_id(), -1),
-                             "SetRecordingDevice() failed to start recording");
-                return -1;
-            }
-        }
-    }
-
-    return 0;
+  return 0;
 }
 
-int VoEHardwareImpl::SetPlayoutDevice(int index)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetPlayoutDevice(index=%d)", index);
-    CriticalSectionScoped cs(_shared->crit_sec());
+int VoEHardwareImpl::SetPlayoutDevice(int index) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetPlayoutDevice(index=%d)", index);
+  CriticalSectionScoped cs(_shared->crit_sec());
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+
+  bool isPlaying(false);
+
+  // Store state about activated playout to be able to restore it after the
+  // playout device has been modified.
+  if (_shared->audio_device()->Playing()) {
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "SetPlayoutDevice() device is modified while playout is "
+                 "active...");
+    isPlaying = true;
+    if (_shared->audio_device()->StopPlayout() == -1) {
+      _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+                            "SetPlayoutDevice() unable to stop playout");
+      return -1;
+    }
+  }
+
+  // We let the module do the index sanity
+
+  // Map indices to unsigned since underlying functions need that
+  uint16_t indexU = static_cast<uint16_t>(index);
+
+  int32_t res(0);
+
+  if (index == -1) {
+    res = _shared->audio_device()->SetPlayoutDevice(
+        AudioDeviceModule::kDefaultCommunicationDevice);
+  } else if (index == -2) {
+    res = _shared->audio_device()->SetPlayoutDevice(
+        AudioDeviceModule::kDefaultDevice);
+  } else {
+    res = _shared->audio_device()->SetPlayoutDevice(indexU);
+  }
+
+  if (res != 0) {
+    _shared->SetLastError(
+        VE_SOUNDCARD_ERROR, kTraceError,
+        "SetPlayoutDevice() unable to set the playout device");
+    return -1;
+  }
+
+  // Init speaker, so user can do volume settings etc
+  if (_shared->audio_device()->InitSpeaker() == -1) {
+    _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
+                          "SetPlayoutDevice() cannot access speaker");
+  }
+
+  // Set number of channels
+  bool available = false;
+  _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+  if (_shared->audio_device()->SetStereoPlayout(available) != 0) {
+    _shared->SetLastError(
+        VE_SOUNDCARD_ERROR, kTraceWarning,
+        "SetPlayoutDevice() failed to set stereo playout mode");
+  }
+
+  // Restore playout if it was enabled already when calling this function.
+  if (isPlaying) {
+    if (!_shared->ext_playout()) {
+      WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                   "SetPlayoutDevice() playout is now being restored...");
+      if (_shared->audio_device()->InitPlayout() != 0) {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_shared->instance_id(), -1),
+                     "SetPlayoutDevice() failed to initialize playout");
         return -1;
-    }
-
-    bool isPlaying(false);
-
-    // Store state about activated playout to be able to restore it after the
-    // playout device has been modified.
-    if (_shared->audio_device()->Playing())
-    {
-        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                     "SetPlayoutDevice() device is modified while playout is "
-                     "active...");
-        isPlaying = true;
-        if (_shared->audio_device()->StopPlayout() == -1)
-        {
-            _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
-                "SetPlayoutDevice() unable to stop playout");
-            return -1;
-        }
-    }
-
-    // We let the module do the index sanity
-
-    // Map indices to unsigned since underlying functions need that
-    uint16_t indexU = static_cast<uint16_t> (index);
-
-    int32_t res(0);
-
-    if (index == -1)
-    {
-        res = _shared->audio_device()->SetPlayoutDevice(
-            AudioDeviceModule::kDefaultCommunicationDevice);
-    }
-    else if (index == -2)
-    {
-        res = _shared->audio_device()->SetPlayoutDevice(
-            AudioDeviceModule::kDefaultDevice);
-    }
-    else
-    {
-        res = _shared->audio_device()->SetPlayoutDevice(indexU);
-    }
-
-    if (res != 0)
-    {
-        _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
-            "SetPlayoutDevice() unable to set the playout device");
+      }
+      if (_shared->audio_device()->StartPlayout() != 0) {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_shared->instance_id(), -1),
+                     "SetPlayoutDevice() failed to start playout");
         return -1;
+      }
     }
+  }
 
-    // Init speaker, so user can do volume settings etc
-    if (_shared->audio_device()->InitSpeaker() == -1)
-    {
-        _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
-            "SetPlayoutDevice() cannot access speaker");
-    }
-
-    // Set number of channels
-    bool available = false;
-    _shared->audio_device()->StereoPlayoutIsAvailable(&available);
-    if (_shared->audio_device()->SetStereoPlayout(available) != 0)
-    {
-        _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
-            "SetPlayoutDevice() failed to set stereo playout mode");
-    }
-
-    // Restore playout if it was enabled already when calling this function.
-    if (isPlaying)
-    {
-        if (!_shared->ext_playout())
-        {
-            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
-                VoEId(_shared->instance_id(), -1),
-                "SetPlayoutDevice() playout is now being restored...");
-            if (_shared->audio_device()->InitPlayout() != 0)
-            {
-                WEBRTC_TRACE(kTraceError, kTraceVoice,
-                  VoEId(_shared->instance_id(), -1),
-                  "SetPlayoutDevice() failed to initialize playout");
-                return -1;
-            }
-            if (_shared->audio_device()->StartPlayout() != 0)
-            {
-                WEBRTC_TRACE(kTraceError, kTraceVoice,
-                             VoEId(_shared->instance_id(), -1),
-                             "SetPlayoutDevice() failed to start playout");
-                return -1;
-            }
-        }
-    }
-
-    return 0;
+  return 0;
 }
 
 int VoEHardwareImpl::SetRecordingSampleRate(unsigned int samples_per_sec) {
@@ -569,7 +507,7 @@
 }
 
 bool VoEHardwareImpl::BuiltInAECIsAvailable() const {
-if (!_shared->statistics().Initialized()) {
+  if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return false;
   }
@@ -577,7 +515,7 @@
 }
 
 int VoEHardwareImpl::EnableBuiltInAEC(bool enable) {
-if (!_shared->statistics().Initialized()) {
+  if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
   }
diff --git a/webrtc/voice_engine/voe_hardware_impl.h b/webrtc/voice_engine/voe_hardware_impl.h
index ca1cf56..25950f0 100644
--- a/webrtc/voice_engine/voe_hardware_impl.h
+++ b/webrtc/voice_engine/voe_hardware_impl.h
@@ -15,48 +15,45 @@
 
 #include "webrtc/voice_engine/shared_data.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VoEHardwareImpl: public VoEHardware
-{
-public:
-    virtual int GetNumOfRecordingDevices(int& devices);
+class VoEHardwareImpl : public VoEHardware {
+ public:
+  int GetNumOfRecordingDevices(int& devices) override;
 
-    virtual int GetNumOfPlayoutDevices(int& devices);
+  int GetNumOfPlayoutDevices(int& devices) override;
 
-    virtual int GetRecordingDeviceName(int index,
-                                       char strNameUTF8[128],
-                                       char strGuidUTF8[128]);
+  int GetRecordingDeviceName(int index,
+                             char strNameUTF8[128],
+                             char strGuidUTF8[128]) override;
 
-    virtual int GetPlayoutDeviceName(int index,
-                                     char strNameUTF8[128],
-                                     char strGuidUTF8[128]);
+  int GetPlayoutDeviceName(int index,
+                           char strNameUTF8[128],
+                           char strGuidUTF8[128]) override;
 
-    virtual int SetRecordingDevice(
-        int index,
-        StereoChannel recordingChannel = kStereoBoth);
+  int SetRecordingDevice(int index,
+                         StereoChannel recordingChannel = kStereoBoth) override;
 
-    virtual int SetPlayoutDevice(int index);
+  int SetPlayoutDevice(int index) override;
 
-    virtual int SetAudioDeviceLayer(AudioLayers audioLayer);
+  int SetAudioDeviceLayer(AudioLayers audioLayer) override;
 
-    virtual int GetAudioDeviceLayer(AudioLayers& audioLayer);
+  int GetAudioDeviceLayer(AudioLayers& audioLayer) override;
 
-    virtual int SetRecordingSampleRate(unsigned int samples_per_sec);
-    virtual int RecordingSampleRate(unsigned int* samples_per_sec) const;
-    virtual int SetPlayoutSampleRate(unsigned int samples_per_sec);
-    virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const;
+  int SetRecordingSampleRate(unsigned int samples_per_sec) override;
+  int RecordingSampleRate(unsigned int* samples_per_sec) const override;
+  int SetPlayoutSampleRate(unsigned int samples_per_sec) override;
+  int PlayoutSampleRate(unsigned int* samples_per_sec) const override;
 
-    virtual bool BuiltInAECIsAvailable() const;
-    virtual int EnableBuiltInAEC(bool enable);
+  bool BuiltInAECIsAvailable() const override;
+  int EnableBuiltInAEC(bool enable) override;
 
-protected:
-    VoEHardwareImpl(voe::SharedData* shared);
-    virtual ~VoEHardwareImpl();
+ protected:
+  VoEHardwareImpl(voe::SharedData* shared);
+  ~VoEHardwareImpl() override;
 
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_neteq_stats_impl.cc b/webrtc/voice_engine/voe_neteq_stats_impl.cc
index 0d8ce50..b6b00d4 100644
--- a/webrtc/voice_engine/voe_neteq_stats_impl.cc
+++ b/webrtc/voice_engine/voe_neteq_stats_impl.cc
@@ -19,61 +19,54 @@
 
 namespace webrtc {
 
-VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine)
-{
+VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
 
-VoENetEqStatsImpl::VoENetEqStatsImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
+VoENetEqStatsImpl::VoENetEqStatsImpl(voe::SharedData* shared)
+    : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
 }
 
-VoENetEqStatsImpl::~VoENetEqStatsImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
+VoENetEqStatsImpl::~VoENetEqStatsImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
 }
 
 int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
-                                            NetworkStatistics& stats)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetNetworkStatistics(channel=%d, stats=?)", channel);
+                                            NetworkStatistics& stats) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetNetworkStatistics(channel=%d, stats=?)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetNetworkStatistics() failed to locate channel");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetNetworkStatistics() failed to locate channel");
+    return -1;
+  }
 
-    return channelPtr->GetNetworkStatistics(stats);
+  return channelPtr->GetNetworkStatistics(stats);
 }
 
 int VoENetEqStatsImpl::GetDecodingCallStatistics(
     int channel, AudioDecodingCallStats* stats) const {
-
   if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
diff --git a/webrtc/voice_engine/voe_neteq_stats_impl.h b/webrtc/voice_engine/voe_neteq_stats_impl.h
index 74b624b..d441e82 100644
--- a/webrtc/voice_engine/voe_neteq_stats_impl.h
+++ b/webrtc/voice_engine/voe_neteq_stats_impl.h
@@ -18,23 +18,21 @@
 
 namespace webrtc {
 
-class VoENetEqStatsImpl : public VoENetEqStats
-{
-public:
-    virtual int GetNetworkStatistics(int channel,
-                                     NetworkStatistics& stats);
+class VoENetEqStatsImpl : public VoENetEqStats {
+ public:
+  int GetNetworkStatistics(int channel, NetworkStatistics& stats) override;
 
-    virtual int GetDecodingCallStatistics(
-        int channel, AudioDecodingCallStats* stats) const;
+  int GetDecodingCallStatistics(int channel,
+                                AudioDecodingCallStats* stats) const override;
 
-protected:
-    VoENetEqStatsImpl(voe::SharedData* shared);
-    virtual ~VoENetEqStatsImpl();
+ protected:
+  VoENetEqStatsImpl(voe::SharedData* shared);
+  ~VoENetEqStatsImpl() override;
 
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
 
-#endif    // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
+#endif  // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
diff --git a/webrtc/voice_engine/voe_network_impl.cc b/webrtc/voice_engine/voe_network_impl.cc
index 89d1b04..eb13233 100644
--- a/webrtc/voice_engine/voe_network_impl.cc
+++ b/webrtc/voice_engine/voe_network_impl.cc
@@ -18,73 +18,62 @@
 #include "webrtc/voice_engine/include/voe_errors.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine)
-{
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine) {
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 }
 
-VoENetworkImpl::VoENetworkImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoENetworkImpl() - ctor");
+VoENetworkImpl::VoENetworkImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoENetworkImpl() - ctor");
 }
 
-VoENetworkImpl::~VoENetworkImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "~VoENetworkImpl() - dtor");
+VoENetworkImpl::~VoENetworkImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "~VoENetworkImpl() - dtor");
 }
 
 int VoENetworkImpl::RegisterExternalTransport(int channel,
-                                              Transport& transport)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetExternalTransport(channel=%d, transport=0x%x)",
-                 channel, &transport);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetExternalTransport() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->RegisterExternalTransport(transport);
+                                              Transport& transport) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetExternalTransport(channel=%d, transport=0x%x)", channel,
+               &transport);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetExternalTransport() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->RegisterExternalTransport(transport);
 }
 
-int VoENetworkImpl::DeRegisterExternalTransport(int channel)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "DeRegisterExternalTransport(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        WEBRTC_TRACE(kTraceError, kTraceVoice,
-                     VoEId(_shared->instance_id(), -1),
-                     "DeRegisterExternalTransport() - invalid state");
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "DeRegisterExternalTransport() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->DeRegisterExternalTransport();
+int VoENetworkImpl::DeRegisterExternalTransport(int channel) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "DeRegisterExternalTransport(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                 "DeRegisterExternalTransport() - invalid state");
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "DeRegisterExternalTransport() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->DeRegisterExternalTransport();
 }
 
 int VoENetworkImpl::ReceivedRTPPacket(int channel,
@@ -96,85 +85,76 @@
 int VoENetworkImpl::ReceivedRTPPacket(int channel,
                                       const void* data,
                                       size_t length,
-                                      const PacketTime& packet_time)
-{
-    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "ReceivedRTPPacket(channel=%d, length=%" PRIuS ")", channel,
-                 length);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    // L16 at 32 kHz, stereo, 10 ms frames (+12 byte RTP header) -> 1292 bytes
-    if ((length < 12) || (length > 1292))
-    {
-        _shared->SetLastError(VE_INVALID_PACKET);
-        LOG(LS_ERROR) << "Invalid packet length: " << length;
-        return -1;
-    }
-    if (NULL == data)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "ReceivedRTPPacket() invalid data vector");
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "ReceivedRTPPacket() failed to locate channel");
-        return -1;
-    }
+                                      const PacketTime& packet_time) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "ReceivedRTPPacket(channel=%d, length=%" PRIuS ")", channel,
+               length);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  // L16 at 32 kHz, stereo, 10 ms frames (+12 byte RTP header) -> 1292 bytes
+  if ((length < 12) || (length > 1292)) {
+    _shared->SetLastError(VE_INVALID_PACKET);
+    LOG(LS_ERROR) << "Invalid packet length: " << length;
+    return -1;
+  }
+  if (NULL == data) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "ReceivedRTPPacket() invalid data vector");
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "ReceivedRTPPacket() failed to locate channel");
+    return -1;
+  }
 
-    if (!channelPtr->ExternalTransport())
-    {
-        _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
-            "ReceivedRTPPacket() external transport is not enabled");
-        return -1;
-    }
-    return channelPtr->ReceivedRTPPacket((const int8_t*) data, length,
-                                         packet_time);
+  if (!channelPtr->ExternalTransport()) {
+    _shared->SetLastError(
+        VE_INVALID_OPERATION, kTraceError,
+        "ReceivedRTPPacket() external transport is not enabled");
+    return -1;
+  }
+  return channelPtr->ReceivedRTPPacket((const int8_t*)data, length,
+                                       packet_time);
 }
 
-int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data,
-                                       size_t length)
-{
-    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "ReceivedRTCPPacket(channel=%d, length=%" PRIuS ")", channel,
-                 length);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (length < 4)
-    {
-        _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
-            "ReceivedRTCPPacket() invalid packet length");
-        return -1;
-    }
-    if (NULL == data)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "ReceivedRTCPPacket() invalid data vector");
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "ReceivedRTCPPacket() failed to locate channel");
-        return -1;
-    }
-    if (!channelPtr->ExternalTransport())
-    {
-        _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
-            "ReceivedRTCPPacket() external transport is not enabled");
-        return -1;
-    }
-    return channelPtr->ReceivedRTCPPacket((const int8_t*) data, length);
+int VoENetworkImpl::ReceivedRTCPPacket(int channel,
+                                       const void* data,
+                                       size_t length) {
+  WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "ReceivedRTCPPacket(channel=%d, length=%" PRIuS ")", channel,
+               length);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (length < 4) {
+    _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
+                          "ReceivedRTCPPacket() invalid packet length");
+    return -1;
+  }
+  if (NULL == data) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "ReceivedRTCPPacket() invalid data vector");
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "ReceivedRTCPPacket() failed to locate channel");
+    return -1;
+  }
+  if (!channelPtr->ExternalTransport()) {
+    _shared->SetLastError(
+        VE_INVALID_OPERATION, kTraceError,
+        "ReceivedRTCPPacket() external transport is not enabled");
+    return -1;
+  }
+  return channelPtr->ReceivedRTCPPacket((const int8_t*)data, length);
 }
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_network_impl.h b/webrtc/voice_engine/voe_network_impl.h
index ee9b92e..ab6a9af 100644
--- a/webrtc/voice_engine/voe_network_impl.h
+++ b/webrtc/voice_engine/voe_network_impl.h
@@ -15,30 +15,28 @@
 
 #include "webrtc/voice_engine/shared_data.h"
 
+namespace webrtc {
 
-namespace webrtc
-{
+class VoENetworkImpl : public VoENetwork {
+ public:
+  int RegisterExternalTransport(int channel, Transport& transport) override;
 
-class VoENetworkImpl: public VoENetwork
-{
-public:
- int RegisterExternalTransport(int channel, Transport& transport) override;
+  int DeRegisterExternalTransport(int channel) override;
 
- int DeRegisterExternalTransport(int channel) override;
+  int ReceivedRTPPacket(int channel, const void* data, size_t length) override;
+  int ReceivedRTPPacket(int channel,
+                        const void* data,
+                        size_t length,
+                        const PacketTime& packet_time) override;
 
- int ReceivedRTPPacket(int channel, const void* data, size_t length) override;
- int ReceivedRTPPacket(int channel,
-                       const void* data,
-                       size_t length,
-                       const PacketTime& packet_time) override;
+  int ReceivedRTCPPacket(int channel, const void* data, size_t length) override;
 
- int ReceivedRTCPPacket(int channel, const void* data, size_t length) override;
+ protected:
+  VoENetworkImpl(voe::SharedData* shared);
+  ~VoENetworkImpl() override;
 
-protected:
-    VoENetworkImpl(voe::SharedData* shared);
-    virtual ~VoENetworkImpl();
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
diff --git a/webrtc/voice_engine/voe_rtp_rtcp_impl.cc b/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
index 7bac260..c4c7067 100644
--- a/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
+++ b/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
@@ -21,145 +21,132 @@
 
 namespace webrtc {
 
-VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine)
-{
+VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
 
-VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
+VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
 }
 
-VoERTP_RTCPImpl::~VoERTP_RTCPImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
+VoERTP_RTCPImpl::~VoERTP_RTCPImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
 }
 
-int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetLocalSSRC() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetLocalSSRC(ssrc);
+int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetLocalSSRC() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetLocalSSRC(ssrc);
 }
 
-int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetLocalSSRC(channel=%d, ssrc=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetLocalSSRC() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetLocalSSRC(ssrc);
+int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetLocalSSRC(channel=%d, ssrc=?)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetLocalSSRC() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetLocalSSRC(ssrc);
 }
 
-int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRemoteSSRC(channel=%d, ssrc=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRemoteSSRC() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRemoteSSRC(ssrc);
+int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRemoteSSRC(channel=%d, ssrc=?)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRemoteSSRC() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRemoteSSRC(ssrc);
 }
 
 int VoERTP_RTCPImpl::SetSendAudioLevelIndicationStatus(int channel,
                                                        bool enable,
-                                                       unsigned char id)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetSendAudioLevelIndicationStatus(channel=%d, enable=%d,"
-                 " ID=%u)", channel, enable, id);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (enable && (id < kVoiceEngineMinRtpExtensionId ||
-                   id > kVoiceEngineMaxRtpExtensionId))
-    {
-        // [RFC5285] The 4-bit id is the local identifier of this element in
-        // the range 1-14 inclusive.
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSendAudioLevelIndicationStatus() invalid ID parameter");
-        return -1;
-    }
+                                                       unsigned char id) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetSendAudioLevelIndicationStatus(channel=%d, enable=%d,"
+               " ID=%u)",
+               channel, enable, id);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (enable && (id < kVoiceEngineMinRtpExtensionId ||
+                 id > kVoiceEngineMaxRtpExtensionId)) {
+    // [RFC5285] The 4-bit id is the local identifier of this element in
+    // the range 1-14 inclusive.
+    _shared->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
+        "SetSendAudioLevelIndicationStatus() invalid ID parameter");
+    return -1;
+  }
 
-    // Set state and id for the specified channel.
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetSendAudioLevelIndicationStatus() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetSendAudioLevelIndicationStatus(enable, id);
+  // Set state and id for the specified channel.
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "SetSendAudioLevelIndicationStatus() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetSendAudioLevelIndicationStatus(enable, id);
 }
 
 int VoERTP_RTCPImpl::SetReceiveAudioLevelIndicationStatus(int channel,
                                                           bool enable,
                                                           unsigned char id) {
-  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+  WEBRTC_TRACE(
+      kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
       "SetReceiveAudioLevelIndicationStatus(channel=%d, enable=%d, id=%u)",
       channel, enable, id);
   if (!_shared->statistics().Initialized()) {
     _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
   }
-  if (enable &&
-      (id < kVoiceEngineMinRtpExtensionId ||
-       id > kVoiceEngineMaxRtpExtensionId)) {
+  if (enable && (id < kVoiceEngineMinRtpExtensionId ||
+                 id > kVoiceEngineMaxRtpExtensionId)) {
     // [RFC5285] The 4-bit id is the local identifier of this element in
     // the range 1-14 inclusive.
-    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+    _shared->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
         "SetReceiveAbsoluteSenderTimeStatus() invalid id parameter");
     return -1;
   }
@@ -167,7 +154,8 @@
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channel_ptr = ch.channel();
   if (channel_ptr == NULL) {
-    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
         "SetReceiveAudioLevelIndicationStatus() failed to locate channel");
     return -1;
   }
@@ -188,7 +176,8 @@
                  id > kVoiceEngineMaxRtpExtensionId)) {
     // [RFC5285] The 4-bit id is the local identifier of this element in
     // the range 1-14 inclusive.
-    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+    _shared->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
         "SetSendAbsoluteSenderTimeStatus() invalid id parameter");
     return -1;
   }
@@ -196,7 +185,8 @@
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
-    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
         "SetSendAbsoluteSenderTimeStatus() failed to locate channel");
     return -1;
   }
@@ -206,7 +196,8 @@
 int VoERTP_RTCPImpl::SetReceiveAbsoluteSenderTimeStatus(int channel,
                                                         bool enable,
                                                         unsigned char id) {
-  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+  WEBRTC_TRACE(
+      kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
       "SetReceiveAbsoluteSenderTimeStatus(channel=%d, enable=%d, id=%u)",
       channel, enable, id);
   if (!_shared->statistics().Initialized()) {
@@ -217,7 +208,8 @@
                  id > kVoiceEngineMaxRtpExtensionId)) {
     // [RFC5285] The 4-bit id is the local identifier of this element in
     // the range 1-14 inclusive.
-    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+    _shared->SetLastError(
+        VE_INVALID_ARGUMENT, kTraceError,
         "SetReceiveAbsoluteSenderTimeStatus() invalid id parameter");
     return -1;
   }
@@ -225,169 +217,145 @@
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channelPtr = ch.channel();
   if (channelPtr == NULL) {
-    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
         "SetReceiveAbsoluteSenderTimeStatus() failed to locate channel");
     return -1;
   }
   return channelPtr->SetReceiveAbsoluteSenderTimeStatus(enable, id);
 }
 
-int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetRTCPStatus() failed to locate channel");
-        return -1;
-    }
-    channelPtr->SetRTCPStatus(enable);
-    return 0;
+int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetRTCPStatus() failed to locate channel");
+    return -1;
+  }
+  channelPtr->SetRTCPStatus(enable);
+  return 0;
 }
 
-int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRTCPStatus(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRTCPStatus() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRTCPStatus(enabled);
+int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRTCPStatus(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRTCPStatus() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRTCPStatus(enabled);
 }
 
-int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256])
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetRTCP_CNAME() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetRTCP_CNAME(cName);
+int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256]) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetRTCP_CNAME() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetRTCP_CNAME(cName);
 }
 
-int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256])
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRemoteRTCP_CNAME() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRemoteRTCP_CNAME(cName);
+int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256]) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRemoteRTCP_CNAME() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRemoteRTCP_CNAME(cName);
 }
 
 int VoERTP_RTCPImpl::GetRemoteRTCPData(
     int channel,
-    unsigned int& NTPHigh, // from sender info in SR
-    unsigned int& NTPLow, // from sender info in SR
-    unsigned int& timestamp, // from sender info in SR
-    unsigned int& playoutTimestamp, // derived locally
-    unsigned int* jitter, // from report block 1 in SR/RR
-    unsigned short* fractionLost) // from report block 1 in SR/RR
+    unsigned int& NTPHigh,           // from sender info in SR
+    unsigned int& NTPLow,            // from sender info in SR
+    unsigned int& timestamp,         // from sender info in SR
+    unsigned int& playoutTimestamp,  // derived locally
+    unsigned int* jitter,            // from report block 1 in SR/RR
+    unsigned short* fractionLost)    // from report block 1 in SR/RR
 {
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRemoteRTCPData(channel=%d,...)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRemoteRTCP_CNAME() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRemoteRTCPData(NTPHigh,
-                                         NTPLow,
-                                         timestamp,
-                                         playoutTimestamp,
-                                         jitter,
-                                         fractionLost);
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRemoteRTCPData(channel=%d,...)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRemoteRTCP_CNAME() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRemoteRTCPData(NTPHigh, NTPLow, timestamp,
+                                       playoutTimestamp, jitter, fractionLost);
 }
 
 int VoERTP_RTCPImpl::GetRTPStatistics(int channel,
                                       unsigned int& averageJitterMs,
                                       unsigned int& maxJitterMs,
-                                      unsigned int& discardedPackets)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRTPStatistics(channel=%d,....)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRTPStatistics() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRTPStatistics(averageJitterMs,
-                                        maxJitterMs,
-                                        discardedPackets);
+                                      unsigned int& discardedPackets) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRTPStatistics(channel=%d,....)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRTPStatistics() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRTPStatistics(averageJitterMs, maxJitterMs,
+                                      discardedPackets);
 }
 
-int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRTCPStatistics(channel=%d)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetRTPStatistics() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRTPStatistics(stats);
+int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRTCPStatistics(channel=%d)", channel);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetRTPStatistics() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRTPStatistics(stats);
 }
 
 int VoERTP_RTCPImpl::GetRemoteRTCPReportBlocks(
@@ -401,159 +369,142 @@
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channel_ptr = ch.channel();
   if (channel_ptr == NULL) {
-    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
         "GetRemoteRTCPReportBlocks() failed to locate channel");
     return -1;
   }
   return channel_ptr->GetRemoteRTCPReportBlocks(report_blocks);
 }
 
-int VoERTP_RTCPImpl::SetREDStatus(int channel, bool enable, int redPayloadtype)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetREDStatus(channel=%d, enable=%d, redPayloadtype=%d)",
-                 channel, enable, redPayloadtype);
+int VoERTP_RTCPImpl::SetREDStatus(int channel,
+                                  bool enable,
+                                  int redPayloadtype) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetREDStatus(channel=%d, enable=%d, redPayloadtype=%d)",
+               channel, enable, redPayloadtype);
 #ifdef WEBRTC_CODEC_RED
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetREDStatus() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetREDStatus(enable, redPayloadtype);
-#else
-    _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-        "SetREDStatus() RED is not supported");
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetREDStatus() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetREDStatus(enable, redPayloadtype);
+#else
+  _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                        "SetREDStatus() RED is not supported");
+  return -1;
 #endif
 }
 
 int VoERTP_RTCPImpl::GetREDStatus(int channel,
                                   bool& enabled,
-                                  int& redPayloadtype)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetREDStatus(channel=%d, enabled=?, redPayloadtype=?)",
-                 channel);
+                                  int& redPayloadtype) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetREDStatus(channel=%d, enabled=?, redPayloadtype=?)",
+               channel);
 #ifdef WEBRTC_CODEC_RED
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetREDStatus() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetREDStatus(enabled, redPayloadtype);
-#else
-    _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
-        "GetREDStatus() RED is not supported");
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
     return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetREDStatus() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetREDStatus(enabled, redPayloadtype);
+#else
+  _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                        "GetREDStatus() RED is not supported");
+  return -1;
 #endif
 }
 
-int VoERTP_RTCPImpl::SetNACKStatus(int channel,
-                                   bool enable,
-                                   int maxNoPackets)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetNACKStatus(channel=%d, enable=%d, maxNoPackets=%d)",
-                 channel, enable, maxNoPackets);
+int VoERTP_RTCPImpl::SetNACKStatus(int channel, bool enable, int maxNoPackets) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetNACKStatus(channel=%d, enable=%d, maxNoPackets=%d)", channel,
+               enable, maxNoPackets);
 
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetNACKStatus() failed to locate channel");
-        return -1;
-    }
-    channelPtr->SetNACKStatus(enable, maxNoPackets);
-    return 0;
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetNACKStatus() failed to locate channel");
+    return -1;
+  }
+  channelPtr->SetNACKStatus(enable, maxNoPackets);
+  return 0;
 }
 
-
 int VoERTP_RTCPImpl::StartRTPDump(int channel,
                                   const char fileNameUTF8[1024],
-                                  RTPDirections direction)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
-                 channel, fileNameUTF8, direction);
-    assert(1024 == FileWrapper::kMaxFileNameSize);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StartRTPDump() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->StartRTPDump(fileNameUTF8, direction);
+                                  RTPDirections direction) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
+               channel, fileNameUTF8, direction);
+  assert(1024 == FileWrapper::kMaxFileNameSize);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StartRTPDump() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->StartRTPDump(fileNameUTF8, direction);
 }
 
-int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "StopRTPDump(channel=%d, direction=%d)", channel, direction);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StopRTPDump() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->StopRTPDump(direction);
+int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "StopRTPDump(channel=%d, direction=%d)", channel, direction);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StopRTPDump() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->StopRTPDump(direction);
 }
 
-int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "RTPDumpIsActive(channel=%d, direction=%d)",
-                 channel, direction);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "StopRTPDump() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->RTPDumpIsActive(direction);
+int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "RTPDumpIsActive(channel=%d, direction=%d)", channel, direction);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "StopRTPDump() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->RTPDumpIsActive(direction);
 }
 
 int VoERTP_RTCPImpl::SetVideoEngineBWETarget(int channel,
                                              ViENetwork* vie_network,
                                              int video_channel) {
-  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+  WEBRTC_TRACE(
+      kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
       "SetVideoEngineBWETarget(channel=%d, vie_network=?, video_channel=%d)",
       channel, vie_network, video_channel);
 
diff --git a/webrtc/voice_engine/voe_rtp_rtcp_impl.h b/webrtc/voice_engine/voe_rtp_rtcp_impl.h
index c2599a2..b83ab2d 100644
--- a/webrtc/voice_engine/voe_rtp_rtcp_impl.h
+++ b/webrtc/voice_engine/voe_rtp_rtcp_impl.h
@@ -17,93 +17,90 @@
 
 namespace webrtc {
 
-class VoERTP_RTCPImpl : public VoERTP_RTCP
-{
-public:
-    // RTCP
-    virtual int SetRTCPStatus(int channel, bool enable);
+class VoERTP_RTCPImpl : public VoERTP_RTCP {
+ public:
+  // RTCP
+  int SetRTCPStatus(int channel, bool enable) override;
 
-    virtual int GetRTCPStatus(int channel, bool& enabled);
+  int GetRTCPStatus(int channel, bool& enabled) override;
 
-    virtual int SetRTCP_CNAME(int channel, const char cName[256]);
+  int SetRTCP_CNAME(int channel, const char cName[256]) override;
 
-    virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]);
+  int GetRemoteRTCP_CNAME(int channel, char cName[256]) override;
 
-    virtual int GetRemoteRTCPData(int channel,
-                                  unsigned int& NTPHigh,
-                                  unsigned int& NTPLow,
-                                  unsigned int& timestamp,
-                                  unsigned int& playoutTimestamp,
-                                  unsigned int* jitter = NULL,
-                                  unsigned short* fractionLost = NULL);
+  int GetRemoteRTCPData(int channel,
+                        unsigned int& NTPHigh,
+                        unsigned int& NTPLow,
+                        unsigned int& timestamp,
+                        unsigned int& playoutTimestamp,
+                        unsigned int* jitter = NULL,
+                        unsigned short* fractionLost = NULL) override;
 
-    // SSRC
-    virtual int SetLocalSSRC(int channel, unsigned int ssrc);
+  // SSRC
+  int SetLocalSSRC(int channel, unsigned int ssrc) override;
 
-    virtual int GetLocalSSRC(int channel, unsigned int& ssrc);
+  int GetLocalSSRC(int channel, unsigned int& ssrc) override;
 
-    virtual int GetRemoteSSRC(int channel, unsigned int& ssrc);
+  int GetRemoteSSRC(int channel, unsigned int& ssrc) override;
 
-    // RTP Header Extension for Client-to-Mixer Audio Level Indication
-    virtual int SetSendAudioLevelIndicationStatus(int channel,
-                                                  bool enable,
-                                                  unsigned char id);
-    virtual int SetReceiveAudioLevelIndicationStatus(int channel,
-                                                     bool enable,
-                                                     unsigned char id);
+  // RTP Header Extension for Client-to-Mixer Audio Level Indication
+  int SetSendAudioLevelIndicationStatus(int channel,
+                                        bool enable,
+                                        unsigned char id) override;
+  int SetReceiveAudioLevelIndicationStatus(int channel,
+                                           bool enable,
+                                           unsigned char id) override;
 
-    // RTP Header Extension for Absolute Sender Time
-    virtual int SetSendAbsoluteSenderTimeStatus(int channel,
-                                                bool enable,
-                                                unsigned char id);
-    virtual int SetReceiveAbsoluteSenderTimeStatus(int channel,
-                                                   bool enable,
-                                                   unsigned char id);
+  // RTP Header Extension for Absolute Sender Time
+  int SetSendAbsoluteSenderTimeStatus(int channel,
+                                      bool enable,
+                                      unsigned char id) override;
+  int SetReceiveAbsoluteSenderTimeStatus(int channel,
+                                         bool enable,
+                                         unsigned char id) override;
 
-    // Statistics
-    virtual int GetRTPStatistics(int channel,
-                                 unsigned int& averageJitterMs,
-                                 unsigned int& maxJitterMs,
-                                 unsigned int& discardedPackets);
+  // Statistics
+  int GetRTPStatistics(int channel,
+                       unsigned int& averageJitterMs,
+                       unsigned int& maxJitterMs,
+                       unsigned int& discardedPackets) override;
 
-    virtual int GetRTCPStatistics(int channel, CallStatistics& stats);
+  int GetRTCPStatistics(int channel, CallStatistics& stats) override;
 
-    virtual int GetRemoteRTCPReportBlocks(
-        int channel, std::vector<ReportBlock>* report_blocks);
+  int GetRemoteRTCPReportBlocks(
+      int channel,
+      std::vector<ReportBlock>* report_blocks) override;
 
-    // RED
-    virtual int SetREDStatus(int channel,
-                             bool enable,
-                             int redPayloadtype = -1);
+  // RED
+  int SetREDStatus(int channel, bool enable, int redPayloadtype = -1) override;
 
-    virtual int GetREDStatus(int channel, bool& enabled, int& redPayloadtype);
+  int GetREDStatus(int channel, bool& enabled, int& redPayloadtype) override;
 
-    //NACK
-    virtual int SetNACKStatus(int channel,
-                              bool enable,
-                              int maxNoPackets);
+  // NACK
+  int SetNACKStatus(int channel, bool enable, int maxNoPackets) override;
 
-    // Store RTP and RTCP packets and dump to file (compatible with rtpplay)
-    virtual int StartRTPDump(int channel,
-                             const char fileNameUTF8[1024],
-                             RTPDirections direction = kRtpIncoming);
+  // Store RTP and RTCP packets and dump to file (compatible with rtpplay)
+  int StartRTPDump(int channel,
+                   const char fileNameUTF8[1024],
+                   RTPDirections direction = kRtpIncoming) override;
 
-    virtual int StopRTPDump(int channel,
-                            RTPDirections direction = kRtpIncoming);
+  int StopRTPDump(int channel, RTPDirections direction = kRtpIncoming) override;
 
-    virtual int RTPDumpIsActive(int channel,
-                                RTPDirections direction = kRtpIncoming);
+  int RTPDumpIsActive(int channel,
+                      RTPDirections direction = kRtpIncoming) override;
 
-    virtual int SetVideoEngineBWETarget(int channel, ViENetwork* vie_network,
-                                        int video_channel);
-protected:
-    VoERTP_RTCPImpl(voe::SharedData* shared);
-    virtual ~VoERTP_RTCPImpl();
+  int SetVideoEngineBWETarget(int channel,
+                              ViENetwork* vie_network,
+                              int video_channel) override;
 
-private:
-    voe::SharedData* _shared;
+ protected:
+  VoERTP_RTCPImpl(voe::SharedData* shared);
+  ~VoERTP_RTCPImpl() override;
+
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
 
-#endif    // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+#endif  // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
diff --git a/webrtc/voice_engine/voe_video_sync_impl.cc b/webrtc/voice_engine/voe_video_sync_impl.cc
index f4c5a6f..f1c7347 100644
--- a/webrtc/voice_engine/voe_video_sync_impl.cc
+++ b/webrtc/voice_engine/voe_video_sync_impl.cc
@@ -18,144 +18,124 @@
 
 namespace webrtc {
 
-VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
 
-VoEVideoSyncImpl::VoEVideoSyncImpl(voe::SharedData* shared) : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
+VoEVideoSyncImpl::VoEVideoSyncImpl(voe::SharedData* shared) : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
 }
 
-VoEVideoSyncImpl::~VoEVideoSyncImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
+VoEVideoSyncImpl::~VoEVideoSyncImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
 }
 
-int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel, unsigned int& timestamp)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
+int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel,
+                                          unsigned int& timestamp) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channel_ptr = ch.channel();
-    if (channel_ptr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetPlayoutTimestamp() failed to locate channel");
-        return -1;
-    }
-    return channel_ptr->GetPlayoutTimestamp(timestamp);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channel_ptr = ch.channel();
+  if (channel_ptr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetPlayoutTimestamp() failed to locate channel");
+    return -1;
+  }
+  return channel_ptr->GetPlayoutTimestamp(timestamp);
 }
 
-int VoEVideoSyncImpl::SetInitTimestamp(int channel,
-                                       unsigned int timestamp)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetInitTimestamp(channel=%d, timestamp=%lu)",
-                 channel, timestamp);
+int VoEVideoSyncImpl::SetInitTimestamp(int channel, unsigned int timestamp) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetInitTimestamp(channel=%d, timestamp=%lu)", channel,
+               timestamp);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetInitTimestamp() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetInitTimestamp(timestamp);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetInitTimestamp() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetInitTimestamp(timestamp);
 }
 
-int VoEVideoSyncImpl::SetInitSequenceNumber(int channel,
-                                            short sequenceNumber)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)",
-                 channel, sequenceNumber);
+int VoEVideoSyncImpl::SetInitSequenceNumber(int channel, short sequenceNumber) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)", channel,
+               sequenceNumber);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetInitSequenceNumber() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetInitSequenceNumber(sequenceNumber);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetInitSequenceNumber() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetInitSequenceNumber(sequenceNumber);
 }
 
-int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel,int delayMs)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetMinimumPlayoutDelay(channel=%d, delayMs=%d)",
-                 channel, delayMs);
+int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel, int delayMs) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetMinimumPlayoutDelay(channel=%d, delayMs=%d)", channel,
+               delayMs);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetMinimumPlayoutDelay() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetMinimumPlayoutDelay(delayMs);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetMinimumPlayoutDelay() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetMinimumPlayoutDelay(delayMs);
 }
 
-int VoEVideoSyncImpl::SetInitialPlayoutDelay(int channel, int delay_ms)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "SetInitialPlayoutDelay(channel=%d, delay_ms=%d)",
-                 channel, delay_ms);
+int VoEVideoSyncImpl::SetInitialPlayoutDelay(int channel, int delay_ms) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "SetInitialPlayoutDelay(channel=%d, delay_ms=%d)", channel,
+               delay_ms);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetInitialPlayoutDelay() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetInitialPlayoutDelay(delay_ms);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetInitialPlayoutDelay() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetInitialPlayoutDelay(delay_ms);
 }
 
 int VoEVideoSyncImpl::GetDelayEstimate(int channel,
@@ -182,52 +162,45 @@
   return 0;
 }
 
-int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetPlayoutBufferSize(bufferMs=?)");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    AudioDeviceModule::BufferType type
-        (AudioDeviceModule::kFixedBufferSize);
-    uint16_t sizeMS(0);
-    if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0)
-    {
-        _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
-            "GetPlayoutBufferSize() failed to read buffer size");
-        return -1;
-    }
-    bufferMs = sizeMS;
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
-    return 0;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  AudioDeviceModule::BufferType type(AudioDeviceModule::kFixedBufferSize);
+  uint16_t sizeMS(0);
+  if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0) {
+    _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+                          "GetPlayoutBufferSize() failed to read buffer size");
+    return -1;
+  }
+  bufferMs = sizeMS;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
+  return 0;
 }
 
-int VoEVideoSyncImpl::GetRtpRtcp(int channel, RtpRtcp** rtpRtcpModule,
-                                 RtpReceiver** rtp_receiver)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
-                 "GetRtpRtcp(channel=%i)", channel);
+int VoEVideoSyncImpl::GetRtpRtcp(int channel,
+                                 RtpRtcp** rtpRtcpModule,
+                                 RtpReceiver** rtp_receiver) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetRtpRtcp(channel=%i)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetPlayoutTimestamp() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetRtpRtcp(rtpRtcpModule, rtp_receiver);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetPlayoutTimestamp() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetRtpRtcp(rtpRtcpModule, rtp_receiver);
 }
 
 int VoEVideoSyncImpl::GetLeastRequiredDelayMs(int channel) const {
diff --git a/webrtc/voice_engine/voe_video_sync_impl.h b/webrtc/voice_engine/voe_video_sync_impl.h
index 8c516fb..aac575c 100644
--- a/webrtc/voice_engine/voe_video_sync_impl.h
+++ b/webrtc/voice_engine/voe_video_sync_impl.h
@@ -17,38 +17,38 @@
 
 namespace webrtc {
 
-class VoEVideoSyncImpl : public VoEVideoSync
-{
-public:
-    virtual int GetPlayoutBufferSize(int& bufferMs);
+class VoEVideoSyncImpl : public VoEVideoSync {
+ public:
+  int GetPlayoutBufferSize(int& bufferMs) override;
 
-    virtual int SetMinimumPlayoutDelay(int channel, int delayMs);
+  int SetMinimumPlayoutDelay(int channel, int delayMs) override;
 
-    virtual int SetInitialPlayoutDelay(int channel, int delay_ms);
+  int SetInitialPlayoutDelay(int channel, int delay_ms) override;
 
-    virtual int GetDelayEstimate(int channel,
-                                 int* jitter_buffer_delay_ms,
-                                 int* playout_buffer_delay_ms);
+  int GetDelayEstimate(int channel,
+                       int* jitter_buffer_delay_ms,
+                       int* playout_buffer_delay_ms) override;
 
-    virtual int GetLeastRequiredDelayMs(int channel) const;
+  int GetLeastRequiredDelayMs(int channel) const override;
 
-    virtual int SetInitTimestamp(int channel, unsigned int timestamp);
+  int SetInitTimestamp(int channel, unsigned int timestamp) override;
 
-    virtual int SetInitSequenceNumber(int channel, short sequenceNumber);
+  int SetInitSequenceNumber(int channel, short sequenceNumber) override;
 
-    virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp);
+  int GetPlayoutTimestamp(int channel, unsigned int& timestamp) override;
 
-    virtual int GetRtpRtcp(int channel, RtpRtcp** rtpRtcpModule,
-                           RtpReceiver** rtp_receiver);
+  int GetRtpRtcp(int channel,
+                 RtpRtcp** rtpRtcpModule,
+                 RtpReceiver** rtp_receiver) override;
 
-protected:
-    VoEVideoSyncImpl(voe::SharedData* shared);
-    virtual ~VoEVideoSyncImpl();
+ protected:
+  VoEVideoSyncImpl(voe::SharedData* shared);
+  ~VoEVideoSyncImpl() override;
 
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
 
-#endif    // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
+#endif  // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
diff --git a/webrtc/voice_engine/voe_volume_control_impl.cc b/webrtc/voice_engine/voe_volume_control_impl.cc
index f27c4ff..3d8a7a8 100644
--- a/webrtc/voice_engine/voe_volume_control_impl.cc
+++ b/webrtc/voice_engine/voe_volume_control_impl.cc
@@ -20,507 +20,435 @@
 
 namespace webrtc {
 
-VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine)
-{
+VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine) {
 #ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
-    return NULL;
+  return NULL;
 #else
-    if (NULL == voiceEngine)
-    {
-        return NULL;
-    }
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    s->AddRef();
-    return s;
+  if (NULL == voiceEngine) {
+    return NULL;
+  }
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  s->AddRef();
+  return s;
 #endif
 }
 
 #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
 
 VoEVolumeControlImpl::VoEVolumeControlImpl(voe::SharedData* shared)
-    : _shared(shared)
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+    : _shared(shared) {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor");
 }
 
-VoEVolumeControlImpl::~VoEVolumeControlImpl()
-{
-    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+VoEVolumeControlImpl::~VoEVolumeControlImpl() {
+  WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor");
 }
 
-int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetSpeakerVolume(volume=%u)", volume);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (volume > kMaxVolumeLevel)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetSpeakerVolume() invalid argument");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (volume > kMaxVolumeLevel) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetSpeakerVolume() invalid argument");
+    return -1;
+  }
 
-    uint32_t maxVol(0);
-    uint32_t spkrVol(0);
+  uint32_t maxVol(0);
+  uint32_t spkrVol(0);
 
-    // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
-    if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0)
-    {
-        _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
-            "SetSpeakerVolume() failed to get max volume");
-        return -1;
-    }
-    // Round the value and avoid floating computation.
-    spkrVol = (uint32_t)((volume * maxVol +
-        (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+  // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
+  if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) {
+    _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+                          "SetSpeakerVolume() failed to get max volume");
+    return -1;
+  }
+  // Round the value and avoid floating computation.
+  spkrVol = (uint32_t)((volume * maxVol + (int)(kMaxVolumeLevel / 2)) /
+                       (kMaxVolumeLevel));
 
-    // set the actual volume using the audio mixer
-    if (_shared->audio_device()->SetSpeakerVolume(spkrVol) != 0)
-    {
-        _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
-            "SetSpeakerVolume() failed to set speaker volume");
-        return -1;
-    }
-    return 0;
+  // set the actual volume using the audio mixer
+  if (_shared->audio_device()->SetSpeakerVolume(spkrVol) != 0) {
+    _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+                          "SetSpeakerVolume() failed to set speaker volume");
+    return -1;
+  }
+  return 0;
 }
 
-int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetSpeakerVolume()");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    uint32_t spkrVol(0);
-    uint32_t maxVol(0);
+  uint32_t spkrVol(0);
+  uint32_t maxVol(0);
 
-    if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0)
-    {
-        _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
-            "GetSpeakerVolume() unable to get speaker volume");
-        return -1;
-    }
+  if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0) {
+    _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+                          "GetSpeakerVolume() unable to get speaker volume");
+    return -1;
+  }
 
-    // scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
-    if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0)
-    {
-        _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
-            "GetSpeakerVolume() unable to get max speaker volume");
-        return -1;
-    }
-    // Round the value and avoid floating computation.
-    volume = (uint32_t) ((spkrVol * kMaxVolumeLevel +
-        (int)(maxVol / 2)) / (maxVol));
+  // scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
+  if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) {
+    _shared->SetLastError(
+        VE_GET_MIC_VOL_ERROR, kTraceError,
+        "GetSpeakerVolume() unable to get max speaker volume");
+    return -1;
+  }
+  // Round the value and avoid floating computation.
+  volume =
+      (uint32_t)((spkrVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol));
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetSpeakerVolume() => volume=%d", volume);
-    return 0;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSpeakerVolume() => volume=%d", volume);
+  return 0;
 }
 
-int VoEVolumeControlImpl::SetMicVolume(unsigned int volume)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::SetMicVolume(unsigned int volume) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetMicVolume(volume=%u)", volume);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (volume > kMaxVolumeLevel)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetMicVolume() invalid argument");
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (volume > kMaxVolumeLevel) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetMicVolume() invalid argument");
+    return -1;
+  }
 
-    uint32_t maxVol(0);
-    uint32_t micVol(0);
+  uint32_t maxVol(0);
+  uint32_t micVol(0);
 
-    // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
-    if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0)
-    {
-        _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
-            "SetMicVolume() failed to get max volume");
-        return -1;
+  // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
+  if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) {
+    _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+                          "SetMicVolume() failed to get max volume");
+    return -1;
+  }
+
+  if (volume == kMaxVolumeLevel) {
+    // On Linux running pulse, users are able to set the volume above 100%
+    // through the volume control panel, where the +100% range is digital
+    // scaling. WebRTC does not support setting the volume above 100%, and
+    // simply ignores changing the volume if the user tries to set it to
+    // |kMaxVolumeLevel| while the current volume is higher than |maxVol|.
+    if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
+      _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+                            "SetMicVolume() unable to get microphone volume");
+      return -1;
     }
+    if (micVol >= maxVol)
+      return 0;
+  }
 
-    if (volume == kMaxVolumeLevel) {
-      // On Linux running pulse, users are able to set the volume above 100%
-      // through the volume control panel, where the +100% range is digital
-      // scaling. WebRTC does not support setting the volume above 100%, and
-      // simply ignores changing the volume if the user tries to set it to
-      // |kMaxVolumeLevel| while the current volume is higher than |maxVol|.
-      if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
-        _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
-            "SetMicVolume() unable to get microphone volume");
-        return -1;
-      }
-      if (micVol >= maxVol)
-        return 0;
-    }
+  // Round the value and avoid floating point computation.
+  micVol = (uint32_t)((volume * maxVol + (int)(kMaxVolumeLevel / 2)) /
+                      (kMaxVolumeLevel));
 
-    // Round the value and avoid floating point computation.
-    micVol = (uint32_t) ((volume * maxVol +
-        (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
-
-    // set the actual volume using the audio mixer
-    if (_shared->audio_device()->SetMicrophoneVolume(micVol) != 0)
-    {
-        _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
-            "SetMicVolume() failed to set mic volume");
-        return -1;
-    }
-    return 0;
+  // set the actual volume using the audio mixer
+  if (_shared->audio_device()->SetMicrophoneVolume(micVol) != 0) {
+    _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+                          "SetMicVolume() failed to set mic volume");
+    return -1;
+  }
+  return 0;
 }
 
-int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetMicVolume()");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    uint32_t micVol(0);
-    uint32_t maxVol(0);
+  uint32_t micVol(0);
+  uint32_t maxVol(0);
 
-    if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0)
-    {
-        _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
-            "GetMicVolume() unable to get microphone volume");
-        return -1;
-    }
+  if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
+    _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+                          "GetMicVolume() unable to get microphone volume");
+    return -1;
+  }
 
-    // scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
-    if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0)
-    {
-        _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
-            "GetMicVolume() unable to get max microphone volume");
-        return -1;
-    }
-    if (micVol < maxVol) {
-      // Round the value and avoid floating point calculation.
-      volume = (uint32_t) ((micVol * kMaxVolumeLevel +
-          (int)(maxVol / 2)) / (maxVol));
-    } else {
-      // Truncate the value to the kMaxVolumeLevel.
-      volume = kMaxVolumeLevel;
-    }
+  // scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
+  if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) {
+    _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+                          "GetMicVolume() unable to get max microphone volume");
+    return -1;
+  }
+  if (micVol < maxVol) {
+    // Round the value and avoid floating point calculation.
+    volume =
+        (uint32_t)((micVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol));
+  } else {
+    // Truncate the value to the kMaxVolumeLevel.
+    volume = kMaxVolumeLevel;
+  }
 
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetMicVolume() => volume=%d", volume);
-    return 0;
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetMicVolume() => volume=%d", volume);
+  return 0;
 }
 
-int VoEVolumeControlImpl::SetInputMute(int channel, bool enable)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::SetInputMute(int channel, bool enable) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetInputMute(channel=%d, enable=%d)", channel, enable);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (channel == -1)
-    {
-        // Mute before demultiplexing <=> affects all channels
-        return _shared->transmit_mixer()->SetMute(enable);
-    }
-    // Mute after demultiplexing <=> affects one channel only
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetInputMute() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetMute(enable);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    // Mute before demultiplexing <=> affects all channels
+    return _shared->transmit_mixer()->SetMute(enable);
+  }
+  // Mute after demultiplexing <=> affects one channel only
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetInputMute() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetMute(enable);
 }
 
-int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetInputMute(channel=%d)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    enabled = _shared->transmit_mixer()->Mute();
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                            "SetInputMute() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        enabled = _shared->transmit_mixer()->Mute();
-    }
-    else
-    {
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "SetInputMute() failed to locate channel");
-            return -1;
-        }
-        enabled = channelPtr->Mute();
-    }
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetInputMute() => enabled = %d", (int)enabled);
-    return 0;
+    enabled = channelPtr->Mute();
+  }
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetInputMute() => enabled = %d", (int)enabled);
+  return 0;
 }
 
-int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetSpeechInputLevel()");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    int8_t currentLevel = _shared->transmit_mixer()->AudioLevel();
-    level = static_cast<unsigned int> (currentLevel);
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetSpeechInputLevel() => %d", level);
-    return 0;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  int8_t currentLevel = _shared->transmit_mixer()->AudioLevel();
+  level = static_cast<unsigned int>(currentLevel);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSpeechInputLevel() => %d", level);
+  return 0;
 }
 
 int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel,
-                                               unsigned int& level)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                               unsigned int& level) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetSpeechOutputLevel(channel=%d, level=?)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->GetSpeechOutputLevel((uint32_t&)level);
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                            "GetSpeechOutputLevel() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->GetSpeechOutputLevel(
-            (uint32_t&)level);
-    }
-    else
-    {
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "GetSpeechOutputLevel() failed to locate channel");
-            return -1;
-        }
-        channelPtr->GetSpeechOutputLevel((uint32_t&)level);
-    }
-    return 0;
+    channelPtr->GetSpeechOutputLevel((uint32_t&)level);
+  }
+  return 0;
 }
 
-int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetSpeechInputLevelFullRange(level=?)");
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    int16_t currentLevel = _shared->transmit_mixer()->
-        AudioLevelFullRange();
-    level = static_cast<unsigned int> (currentLevel);
-    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
-        VoEId(_shared->instance_id(), -1),
-        "GetSpeechInputLevelFullRange() => %d", level);
-    return 0;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  int16_t currentLevel = _shared->transmit_mixer()->AudioLevelFullRange();
+  level = static_cast<unsigned int>(currentLevel);
+  WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+               "GetSpeechInputLevelFullRange() => %d", level);
+  return 0;
 }
 
 int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel,
-                                                        unsigned int& level)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                                        unsigned int& level) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (channel == -1) {
+    return _shared->output_mixer()->GetSpeechOutputLevelFullRange(
+        (uint32_t&)level);
+  } else {
+    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+    voe::Channel* channelPtr = ch.channel();
+    if (channelPtr == NULL) {
+      _shared->SetLastError(
+          VE_CHANNEL_NOT_VALID, kTraceError,
+          "GetSpeechOutputLevelFullRange() failed to locate channel");
+      return -1;
     }
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->GetSpeechOutputLevelFullRange(
-            (uint32_t&)level);
-    }
-    else
-    {
-        voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-        voe::Channel* channelPtr = ch.channel();
-        if (channelPtr == NULL)
-        {
-            _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-                "GetSpeechOutputLevelFullRange() failed to locate channel");
-            return -1;
-        }
-        channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level);
-    }
-    return 0;
+    channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level);
+  }
+  return 0;
 }
 
 int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
-                                                        float scaling)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                                        float scaling) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
                channel, scaling);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    if (scaling < kMinOutputVolumeScaling ||
-        scaling > kMaxOutputVolumeScaling)
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetChannelOutputVolumeScaling() invalid parameter");
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetChannelOutputVolumeScaling() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetChannelOutputVolumeScaling(scaling);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  if (scaling < kMinOutputVolumeScaling || scaling > kMaxOutputVolumeScaling) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetChannelOutputVolumeScaling() invalid parameter");
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "SetChannelOutputVolumeScaling() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetChannelOutputVolumeScaling(scaling);
 }
 
 int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel,
-                                                        float& scaling)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                                        float& scaling) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel);
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetChannelOutputVolumeScaling() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetChannelOutputVolumeScaling(scaling);
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(
+        VE_CHANNEL_NOT_VALID, kTraceError,
+        "GetChannelOutputVolumeScaling() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetChannelOutputVolumeScaling(scaling);
 }
 
 int VoEVolumeControlImpl::SetOutputVolumePan(int channel,
                                              float left,
-                                             float right)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                             float right) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)",
                channel, left, right);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    bool available(false);
-    _shared->audio_device()->StereoPlayoutIsAvailable(&available);
-    if (!available)
-    {
-        _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
-            "SetOutputVolumePan() stereo playout not supported");
-        return -1;
-    }
-    if ((left < kMinOutputVolumePanning)  ||
-        (left > kMaxOutputVolumePanning)  ||
-        (right < kMinOutputVolumePanning) ||
-        (right > kMaxOutputVolumePanning))
-    {
-        _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
-            "SetOutputVolumePan() invalid parameter");
-        return -1;
-    }
+  bool available(false);
+  _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+  if (!available) {
+    _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
+                          "SetOutputVolumePan() stereo playout not supported");
+    return -1;
+  }
+  if ((left < kMinOutputVolumePanning) || (left > kMaxOutputVolumePanning) ||
+      (right < kMinOutputVolumePanning) || (right > kMaxOutputVolumePanning)) {
+    _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                          "SetOutputVolumePan() invalid parameter");
+    return -1;
+  }
 
-    if (channel == -1)
-    {
-        // Master balance (affectes the signal after output mixing)
-        return _shared->output_mixer()->SetOutputVolumePan(left, right);
-    }
-    // Per-channel balance (affects the signal before output mixing)
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "SetOutputVolumePan() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->SetOutputVolumePan(left, right);
+  if (channel == -1) {
+    // Master balance (affectes the signal after output mixing)
+    return _shared->output_mixer()->SetOutputVolumePan(left, right);
+  }
+  // Per-channel balance (affects the signal before output mixing)
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "SetOutputVolumePan() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->SetOutputVolumePan(left, right);
 }
 
 int VoEVolumeControlImpl::GetOutputVolumePan(int channel,
                                              float& left,
-                                             float& right)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+                                             float& right) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                "GetOutputVolumePan(channel=%d, left=?, right=?)", channel);
 
-    if (!_shared->statistics().Initialized())
-    {
-        _shared->SetLastError(VE_NOT_INITED, kTraceError);
-        return -1;
-    }
+  if (!_shared->statistics().Initialized()) {
+    _shared->SetLastError(VE_NOT_INITED, kTraceError);
+    return -1;
+  }
 
-    bool available(false);
-    _shared->audio_device()->StereoPlayoutIsAvailable(&available);
-    if (!available)
-    {
-        _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
-            "GetOutputVolumePan() stereo playout not supported");
-        return -1;
-    }
+  bool available(false);
+  _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+  if (!available) {
+    _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
+                          "GetOutputVolumePan() stereo playout not supported");
+    return -1;
+  }
 
-    if (channel == -1)
-    {
-        return _shared->output_mixer()->GetOutputVolumePan(left, right);
-    }
-    voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
-    voe::Channel* channelPtr = ch.channel();
-    if (channelPtr == NULL)
-    {
-        _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
-            "GetOutputVolumePan() failed to locate channel");
-        return -1;
-    }
-    return channelPtr->GetOutputVolumePan(left, right);
+  if (channel == -1) {
+    return _shared->output_mixer()->GetOutputVolumePan(left, right);
+  }
+  voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
+  voe::Channel* channelPtr = ch.channel();
+  if (channelPtr == NULL) {
+    _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                          "GetOutputVolumePan() failed to locate channel");
+    return -1;
+  }
+  return channelPtr->GetOutputVolumePan(left, right);
 }
 
 #endif  // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
diff --git a/webrtc/voice_engine/voe_volume_control_impl.h b/webrtc/voice_engine/voe_volume_control_impl.h
index b5e3b1b..16c9c7d 100644
--- a/webrtc/voice_engine/voe_volume_control_impl.h
+++ b/webrtc/voice_engine/voe_volume_control_impl.h
@@ -17,47 +17,44 @@
 
 namespace webrtc {
 
-class VoEVolumeControlImpl : public VoEVolumeControl
-{
-public:
-    virtual int SetSpeakerVolume(unsigned int volume);
+class VoEVolumeControlImpl : public VoEVolumeControl {
+ public:
+  int SetSpeakerVolume(unsigned int volume) override;
 
-    virtual int GetSpeakerVolume(unsigned int& volume);
+  int GetSpeakerVolume(unsigned int& volume) override;
 
-    virtual int SetMicVolume(unsigned int volume);
+  int SetMicVolume(unsigned int volume) override;
 
-    virtual int GetMicVolume(unsigned int& volume);
+  int GetMicVolume(unsigned int& volume) override;
 
-    virtual int SetInputMute(int channel, bool enable);
+  int SetInputMute(int channel, bool enable) override;
 
-    virtual int GetInputMute(int channel, bool& enabled);
+  int GetInputMute(int channel, bool& enabled) override;
 
-    virtual int GetSpeechInputLevel(unsigned int& level);
+  int GetSpeechInputLevel(unsigned int& level) override;
 
-    virtual int GetSpeechOutputLevel(int channel, unsigned int& level);
+  int GetSpeechOutputLevel(int channel, unsigned int& level) override;
 
-    virtual int GetSpeechInputLevelFullRange(unsigned int& level);
+  int GetSpeechInputLevelFullRange(unsigned int& level) override;
 
-    virtual int GetSpeechOutputLevelFullRange(int channel,
-                                              unsigned int& level);
+  int GetSpeechOutputLevelFullRange(int channel, unsigned int& level) override;
 
-    virtual int SetChannelOutputVolumeScaling(int channel, float scaling);
+  int SetChannelOutputVolumeScaling(int channel, float scaling) override;
 
-    virtual int GetChannelOutputVolumeScaling(int channel, float& scaling);
+  int GetChannelOutputVolumeScaling(int channel, float& scaling) override;
 
-    virtual int SetOutputVolumePan(int channel, float left, float right);
+  int SetOutputVolumePan(int channel, float left, float right) override;
 
-    virtual int GetOutputVolumePan(int channel, float& left, float& right);
+  int GetOutputVolumePan(int channel, float& left, float& right) override;
 
+ protected:
+  VoEVolumeControlImpl(voe::SharedData* shared);
+  ~VoEVolumeControlImpl() override;
 
-protected:
-    VoEVolumeControlImpl(voe::SharedData* shared);
-    virtual ~VoEVolumeControlImpl();
-
-private:
-    voe::SharedData* _shared;
+ private:
+  voe::SharedData* _shared;
 };
 
 }  // namespace webrtc
 
-#endif    // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+#endif  // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
diff --git a/webrtc/voice_engine/voice_engine_defines.h b/webrtc/voice_engine/voice_engine_defines.h
index cde9470..5fd8120 100644
--- a/webrtc/voice_engine/voice_engine_defines.h
+++ b/webrtc/voice_engine/voice_engine_defines.h
@@ -44,17 +44,17 @@
 const float kMaxOutputVolumePanning = 1.0f;
 
 // DTMF
-enum { kMinDtmfEventCode = 0 };                 // DTMF digit "0"
-enum { kMaxDtmfEventCode = 15 };                // DTMF digit "D"
-enum { kMinTelephoneEventCode = 0 };            // RFC4733 (Section 2.3.1)
-enum { kMaxTelephoneEventCode = 255 };          // RFC4733 (Section 2.3.1)
+enum { kMinDtmfEventCode = 0 };         // DTMF digit "0"
+enum { kMaxDtmfEventCode = 15 };        // DTMF digit "D"
+enum { kMinTelephoneEventCode = 0 };    // RFC4733 (Section 2.3.1)
+enum { kMaxTelephoneEventCode = 255 };  // RFC4733 (Section 2.3.1)
 enum { kMinTelephoneEventDuration = 100 };
-enum { kMaxTelephoneEventDuration = 60000 };    // Actual limit is 2^16
-enum { kMinTelephoneEventAttenuation = 0 };     // 0 dBm0
-enum { kMaxTelephoneEventAttenuation = 36 };    // -36 dBm0
-enum { kMinTelephoneEventSeparationMs = 100 };  // Min delta time between two
-                                                // telephone events
-enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 };       // assumes Ethernet
+enum { kMaxTelephoneEventDuration = 60000 };       // Actual limit is 2^16
+enum { kMinTelephoneEventAttenuation = 0 };        // 0 dBm0
+enum { kMaxTelephoneEventAttenuation = 36 };       // -36 dBm0
+enum { kMinTelephoneEventSeparationMs = 100 };     // Min delta time between two
+                                                   // telephone events
+enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 };  // assumes Ethernet
 
 enum { kVoiceEngineMaxModuleVersionSize = 960 };
 
@@ -65,15 +65,15 @@
 const NoiseSuppression::Level kDefaultNsMode = NoiseSuppression::kModerate;
 const GainControl::Mode kDefaultAgcMode =
 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
-  GainControl::kAdaptiveDigital;
+    GainControl::kAdaptiveDigital;
 #else
-  GainControl::kAdaptiveAnalog;
+    GainControl::kAdaptiveAnalog;
 #endif
 const bool kDefaultAgcState =
 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
-  false;
+    false;
 #else
-  true;
+    true;
 #endif
 const GainControl::Mode kDefaultRxAgcMode = GainControl::kAdaptiveDigital;
 
@@ -131,53 +131,50 @@
 //  Macros
 // ----------------------------------------------------------------------------
 
-#define NOT_SUPPORTED(stat)                  \
-  LOG_F(LS_ERROR) << "not supported";        \
-  stat.SetLastError(VE_FUNC_NOT_SUPPORTED);  \
+#define NOT_SUPPORTED(stat)                 \
+  LOG_F(LS_ERROR) << "not supported";       \
+  stat.SetLastError(VE_FUNC_NOT_SUPPORTED); \
   return -1;
 
 #if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
-  #include <windows.h>
-  #include <stdio.h>
-  #define DEBUG_PRINT(...)      \
-  {                             \
-    char msg[256];              \
-    sprintf(msg, __VA_ARGS__);  \
-    OutputDebugStringA(msg);    \
+#include <windows.h>
+#include <stdio.h>
+#define DEBUG_PRINT(...)       \
+  {                            \
+    char msg[256];             \
+    sprintf(msg, __VA_ARGS__); \
+    OutputDebugStringA(msg);   \
   }
 #else
-  // special fix for visual 2003
-  #define DEBUG_PRINT(exp)      ((void)0)
+// special fix for visual 2003
+#define DEBUG_PRINT(exp) ((void)0)
 #endif  // defined(_DEBUG) && defined(_WIN32)
 
-#define CHECK_CHANNEL(channel)  if (CheckChannel(channel) == -1) return -1;
+#define CHECK_CHANNEL(channel)     \
+  if (CheckChannel(channel) == -1) \
+    return -1;
 
 // ----------------------------------------------------------------------------
 //  Inline functions
 // ----------------------------------------------------------------------------
 
-namespace webrtc
-{
+namespace webrtc {
 
-inline int VoEId(int veId, int chId)
-{
-    if (chId == -1)
-    {
-        const int dummyChannel(99);
-        return (int) ((veId << 16) + dummyChannel);
-    }
-    return (int) ((veId << 16) + chId);
+inline int VoEId(int veId, int chId) {
+  if (chId == -1) {
+    const int dummyChannel(99);
+    return (int)((veId << 16) + dummyChannel);
+  }
+  return (int)((veId << 16) + chId);
 }
 
-inline int VoEModuleId(int veId, int chId)
-{
-    return (int) ((veId << 16) + chId);
+inline int VoEModuleId(int veId, int chId) {
+  return (int)((veId << 16) + chId);
 }
 
 // Convert module ID to internal VoE channel ID
-inline int VoEChannelId(int moduleId)
-{
-    return (int) (moduleId & 0xffff);
+inline int VoEChannelId(int moduleId) {
+  return (int)(moduleId & 0xffff);
 }
 
 }  // namespace webrtc
@@ -190,21 +187,21 @@
 
 #if defined(_WIN32)
 
-  #include <windows.h>
+#include <windows.h>
 
-  #pragma comment( lib, "winmm.lib" )
+#pragma comment(lib, "winmm.lib")
 
-  #ifndef WEBRTC_EXTERNAL_TRANSPORT
-    #pragma comment( lib, "ws2_32.lib" )
-  #endif
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+#pragma comment(lib, "ws2_32.lib")
+#endif
 
 // ----------------------------------------------------------------------------
 //  Defines
 // ----------------------------------------------------------------------------
 
 // Default device for Windows PC
-  #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
-    AudioDeviceModule::kDefaultCommunicationDevice
+#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
+  AudioDeviceModule::kDefaultCommunicationDevice
 
 #endif  // #if (defined(_WIN32)
 
@@ -218,11 +215,11 @@
 #include <sys/socket.h>
 #include <sys/types.h>
 #ifndef QNX
-  #include <linux/net.h>
+#include <linux/net.h>
 #ifndef ANDROID
-  #include <sys/soundcard.h>
-#endif // ANDROID
-#endif // QNX
+#include <sys/soundcard.h>
+#endif  // ANDROID
+#endif  // QNX
 #include <errno.h>
 #include <fcntl.h>
 #include <sched.h>
@@ -250,8 +247,8 @@
 #endif
 #define GetLastError() errno
 #define WSAGetLastError() errno
-#define LPCTSTR const char*
-#define LPCSTR const char*
+#define LPCTSTR const char *
+#define LPCSTR const char *
 #define wsprintf sprintf
 #define TEXT(a) a
 #define _ftprintf fprintf
@@ -287,11 +284,11 @@
 #include <time.h>
 #include <unistd.h>
 #if !defined(WEBRTC_IOS)
-  #include <CoreServices/CoreServices.h>
-  #include <CoreAudio/CoreAudio.h>
-  #include <AudioToolbox/DefaultAudioOutput.h>
-  #include <AudioToolbox/AudioConverter.h>
-  #include <CoreAudio/HostTime.h>
+#include <CoreServices/CoreServices.h>
+#include <CoreAudio/CoreAudio.h>
+#include <AudioToolbox/DefaultAudioOutput.h>
+#include <AudioToolbox/AudioConverter.h>
+#include <CoreAudio/HostTime.h>
 #endif
 
 #define DWORD unsigned long int
@@ -306,7 +303,7 @@
 #define _stricmp strcasecmp
 #define GetLastError() errno
 #define WSAGetLastError() errno
-#define LPCTSTR const char*
+#define LPCTSTR const char *
 #define wsprintf sprintf
 #define TEXT(a) a
 #define _ftprintf fprintf
@@ -314,11 +311,11 @@
 #define FAR
 #define __cdecl
 #define LPSOCKADDR struct sockaddr *
-#define LPCSTR const char*
+#define LPCSTR const char *
 #define ULONG unsigned long
 
 // Default device for Mac and iPhone
 #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
 #endif  // #ifdef WEBRTC_MAC
 
-#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
+#endif  // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
diff --git a/webrtc/voice_engine/voice_engine_impl.cc b/webrtc/voice_engine/voice_engine_impl.cc
index cb49a47..cea5113 100644
--- a/webrtc/voice_engine/voice_engine_impl.cc
+++ b/webrtc/voice_engine/voice_engine_impl.cc
@@ -22,8 +22,7 @@
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 // Counter to be ensure that we can add a correct ID in all static trace
 // methods. It is not the nicest solution, especially not since we already
@@ -31,15 +30,14 @@
 // improvement here.
 static int32_t gVoiceEngineInstanceCounter = 0;
 
-VoiceEngine* GetVoiceEngine(const Config* config, bool owns_config)
-{
+VoiceEngine* GetVoiceEngine(const Config* config, bool owns_config) {
 #if (defined _WIN32)
   HMODULE hmod = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll"));
 
   if (hmod) {
     typedef VoiceEngine* (*PfnGetVoiceEngine)(void);
-    PfnGetVoiceEngine pfn = (PfnGetVoiceEngine)GetProcAddress(
-        hmod,"GetVoiceEngine");
+    PfnGetVoiceEngine pfn =
+        (PfnGetVoiceEngine)GetProcAddress(hmod, "GetVoiceEngine");
     if (pfn) {
       VoiceEngine* self = pfn();
       if (owns_config) {
@@ -50,13 +48,12 @@
   }
 #endif
 
-    VoiceEngineImpl* self = new VoiceEngineImpl(config, owns_config);
-    if (self != NULL)
-    {
-        self->AddRef();  // First reference.  Released in VoiceEngine::Delete.
-        gVoiceEngineInstanceCounter++;
-    }
-    return self;
+  VoiceEngineImpl* self = new VoiceEngineImpl(config, owns_config);
+  if (self != NULL) {
+    self->AddRef();  // First reference.  Released in VoiceEngine::Delete.
+    gVoiceEngineInstanceCounter++;
+  }
+  return self;
 }
 
 int VoiceEngineImpl::AddRef() {
@@ -69,8 +66,7 @@
   assert(new_ref >= 0);
   if (new_ref == 0) {
     WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
-                 "VoiceEngineImpl self deleting (voiceEngine=0x%p)",
-                 this);
+                 "VoiceEngineImpl self deleting (voiceEngine=0x%p)", this);
 
     // Clear any pointers before starting destruction. Otherwise worker-
     // threads will still have pointers to a partially destructed object.
@@ -93,67 +89,62 @@
   return GetVoiceEngine(&config, false);
 }
 
-int VoiceEngine::SetTraceFilter(unsigned int filter)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
-                 VoEId(gVoiceEngineInstanceCounter, -1),
-                 "SetTraceFilter(filter=0x%x)", filter);
+int VoiceEngine::SetTraceFilter(unsigned int filter) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+               VoEId(gVoiceEngineInstanceCounter, -1),
+               "SetTraceFilter(filter=0x%x)", filter);
 
-    // Remember old filter
-    uint32_t oldFilter = Trace::level_filter();
-    Trace::set_level_filter(filter);
+  // Remember old filter
+  uint32_t oldFilter = Trace::level_filter();
+  Trace::set_level_filter(filter);
 
-    // If previous log was ignored, log again after changing filter
-    if (kTraceNone == oldFilter)
-    {
-        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
-                     "SetTraceFilter(filter=0x%x)", filter);
-    }
+  // If previous log was ignored, log again after changing filter
+  if (kTraceNone == oldFilter) {
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1, "SetTraceFilter(filter=0x%x)",
+                 filter);
+  }
 
-    return 0;
+  return 0;
 }
 
-int VoiceEngine::SetTraceFile(const char* fileNameUTF8,
-                              bool addFileCounter)
-{
-    int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
-                 VoEId(gVoiceEngineInstanceCounter, -1),
-                 "SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)",
-                 fileNameUTF8, addFileCounter);
-    return (ret);
+int VoiceEngine::SetTraceFile(const char* fileNameUTF8, bool addFileCounter) {
+  int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+               VoEId(gVoiceEngineInstanceCounter, -1),
+               "SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)", fileNameUTF8,
+               addFileCounter);
+  return (ret);
 }
 
-int VoiceEngine::SetTraceCallback(TraceCallback* callback)
-{
-    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
-                 VoEId(gVoiceEngineInstanceCounter, -1),
-                 "SetTraceCallback(callback=0x%x)", callback);
-    return (Trace::SetTraceCallback(callback));
+int VoiceEngine::SetTraceCallback(TraceCallback* callback) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+               VoEId(gVoiceEngineInstanceCounter, -1),
+               "SetTraceCallback(callback=0x%x)", callback);
+  return (Trace::SetTraceCallback(callback));
 }
 
-bool VoiceEngine::Delete(VoiceEngine*& voiceEngine)
-{
-    if (voiceEngine == NULL)
-        return false;
+bool VoiceEngine::Delete(VoiceEngine*& voiceEngine) {
+  if (voiceEngine == NULL)
+    return false;
 
-    VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
-    // Release the reference that was added in GetVoiceEngine.
-    int ref = s->Release();
-    voiceEngine = NULL;
+  VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
+  // Release the reference that was added in GetVoiceEngine.
+  int ref = s->Release();
+  voiceEngine = NULL;
 
-    if (ref != 0) {
-        WEBRTC_TRACE(kTraceWarning, kTraceVoice, -1,
-            "VoiceEngine::Delete did not release the very last reference.  "
-            "%d references remain.", ref);
-    }
+  if (ref != 0) {
+    WEBRTC_TRACE(
+        kTraceWarning, kTraceVoice, -1,
+        "VoiceEngine::Delete did not release the very last reference.  "
+        "%d references remain.",
+        ref);
+  }
 
-    return true;
+  return true;
 }
 
 #if !defined(WEBRTC_CHROMIUM_BUILD)
-int VoiceEngine::SetAndroidObjects(void* javaVM, void* context)
-{
+int VoiceEngine::SetAndroidObjects(void* javaVM, void* context) {
 #ifdef WEBRTC_ANDROID
 #ifdef WEBRTC_ANDROID_OPENSLES
   typedef AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>
diff --git a/webrtc/voice_engine/voice_engine_impl.h b/webrtc/voice_engine/voice_engine_impl.h
index 992bc2d..07f29c3 100644
--- a/webrtc/voice_engine/voice_engine_impl.h
+++ b/webrtc/voice_engine/voice_engine_impl.h
@@ -47,8 +47,7 @@
 #include "webrtc/voice_engine/voe_volume_control_impl.h"
 #endif
 
-namespace webrtc
-{
+namespace webrtc {
 
 class VoiceEngineImpl : public voe::SharedData,  // Must be the first base class
                         public VoiceEngine,
@@ -83,11 +82,10 @@
 #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
                         public VoEVolumeControlImpl,
 #endif
-                        public VoEBaseImpl
-{
-public:
-    VoiceEngineImpl(const Config* config, bool owns_config) :
-        SharedData(*config),
+                        public VoEBaseImpl {
+ public:
+  VoiceEngineImpl(const Config* config, bool owns_config)
+      : SharedData(*config),
 #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
         VoEAudioProcessingImpl(this),
 #endif
@@ -121,24 +119,20 @@
 #endif
         VoEBaseImpl(this),
         _ref_count(0),
-        own_config_(owns_config ? config : NULL)
-    {
-    }
-    virtual ~VoiceEngineImpl()
-    {
-        assert(_ref_count.Value() == 0);
-    }
+        own_config_(owns_config ? config : NULL) {
+  }
+  ~VoiceEngineImpl() override { assert(_ref_count.Value() == 0); }
 
-    int AddRef();
+  int AddRef();
 
-    // This implements the Release() method for all the inherited interfaces.
-    virtual int Release();
+  // This implements the Release() method for all the inherited interfaces.
+  int Release() override;
 
-private:
-    Atomic32 _ref_count;
-    rtc::scoped_ptr<const Config> own_config_;
+ private:
+  Atomic32 _ref_count;
+  rtc::scoped_ptr<const Config> own_config_;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
+#endif  // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H