Lint fix for webrtc/modules/video_coding PART 2!

Trying to submit all changes at once proved impossible since there were
too many changes in too many files. The changes to PRESUBMIT.py
will be uploaded in the last CL.
(original CL: https://codereview.webrtc.org/1528503003/)

BUG=webrtc:5309
TBR=mflodman@webrtc.org

Review URL: https://codereview.webrtc.org/1543503002

Cr-Commit-Position: refs/heads/master@{#11102}
diff --git a/webrtc/modules/video_coding/content_metrics_processing.cc b/webrtc/modules/video_coding/content_metrics_processing.cc
index 3b94098..0c3a6db 100644
--- a/webrtc/modules/video_coding/content_metrics_processing.cc
+++ b/webrtc/modules/video_coding/content_metrics_processing.cc
@@ -38,15 +38,15 @@
   recursive_avg_->Reset();
   uniform_avg_->Reset();
   frame_cnt_uniform_avg_ = 0;
-  avg_motion_level_  = 0.0f;
+  avg_motion_level_ = 0.0f;
   avg_spatial_level_ = 0.0f;
   return VCM_OK;
 }
 
 void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
   // Update factor for recursive averaging.
-  recursive_avg_factor_ = static_cast<float> (1000.0f) /
-      static_cast<float>(frameRate *  kQmMinIntervalMs);
+  recursive_avg_factor_ = static_cast<float>(1000.0f) /
+                          static_cast<float>(frameRate * kQmMinIntervalMs);
 }
 
 VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
@@ -58,10 +58,10 @@
     return NULL;
   }
   // Two metrics are used: motion and spatial level.
-  uniform_avg_->motion_magnitude = avg_motion_level_ /
-      static_cast<float>(frame_cnt_uniform_avg_);
-  uniform_avg_->spatial_pred_err = avg_spatial_level_ /
-      static_cast<float>(frame_cnt_uniform_avg_);
+  uniform_avg_->motion_magnitude =
+      avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
+  uniform_avg_->spatial_pred_err =
+      avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
   return uniform_avg_;
 }
 
@@ -73,7 +73,7 @@
 }
 
 int VCMContentMetricsProcessing::UpdateContentData(
-    const VideoContentMetrics *contentMetrics) {
+    const VideoContentMetrics* contentMetrics) {
   if (contentMetrics == NULL) {
     return VCM_OK;
   }
@@ -81,7 +81,7 @@
 }
 
 int VCMContentMetricsProcessing::ProcessContent(
-    const VideoContentMetrics *contentMetrics) {
+    const VideoContentMetrics* contentMetrics) {
   // Update the recursive averaged metrics: average is over longer window
   // of time: over QmMinIntervalMs ms.
   UpdateRecursiveAvg(contentMetrics);
@@ -92,34 +92,33 @@
 }
 
 void VCMContentMetricsProcessing::UpdateUniformAvg(
-    const VideoContentMetrics *contentMetrics) {
+    const VideoContentMetrics* contentMetrics) {
   // Update frame counter.
   frame_cnt_uniform_avg_ += 1;
   // Update averaged metrics: motion and spatial level are used.
   avg_motion_level_ += contentMetrics->motion_magnitude;
-  avg_spatial_level_ +=  contentMetrics->spatial_pred_err;
+  avg_spatial_level_ += contentMetrics->spatial_pred_err;
   return;
 }
 
 void VCMContentMetricsProcessing::UpdateRecursiveAvg(
-    const VideoContentMetrics *contentMetrics) {
-
+    const VideoContentMetrics* contentMetrics) {
   // Spatial metrics: 2x2, 1x2(H), 2x1(V).
-  recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) *
-      recursive_avg_->spatial_pred_err +
+  recursive_avg_->spatial_pred_err =
+      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
       recursive_avg_factor_ * contentMetrics->spatial_pred_err;
 
-  recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) *
-      recursive_avg_->spatial_pred_err_h +
+  recursive_avg_->spatial_pred_err_h =
+      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
       recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
 
-  recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) *
-      recursive_avg_->spatial_pred_err_v +
+  recursive_avg_->spatial_pred_err_v =
+      (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
       recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
 
   // Motion metric: Derived from NFD (normalized frame difference).
-  recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) *
-      recursive_avg_->motion_magnitude +
+  recursive_avg_->motion_magnitude =
+      (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
       recursive_avg_factor_ * contentMetrics->motion_magnitude;
 }
-}  // namespace
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/content_metrics_processing.h b/webrtc/modules/video_coding/content_metrics_processing.h
index 3517f75..3f67ec1 100644
--- a/webrtc/modules/video_coding/content_metrics_processing.h
+++ b/webrtc/modules/video_coding/content_metrics_processing.h
@@ -18,14 +18,10 @@
 struct VideoContentMetrics;
 
 // QM interval time (in ms)
-enum {
-  kQmMinIntervalMs = 10000
-};
+enum { kQmMinIntervalMs = 10000 };
 
 // Flag for NFD metric vs motion metric
-enum {
-  kNfdMetric = 1
-};
+enum { kNfdMetric = 1 };
 
 /**********************************/
 /* Content Metrics Processing     */
@@ -36,7 +32,7 @@
   ~VCMContentMetricsProcessing();
 
   // Update class with latest metrics.
-  int UpdateContentData(const VideoContentMetrics *contentMetrics);
+  int UpdateContentData(const VideoContentMetrics* contentMetrics);
 
   // Reset the short-term averaged content data.
   void ResetShortTermAvgData();
@@ -57,13 +53,13 @@
 
  private:
   // Compute working average.
-  int ProcessContent(const VideoContentMetrics *contentMetrics);
+  int ProcessContent(const VideoContentMetrics* contentMetrics);
 
   // Update the recursive averaged metrics: longer time average (~5/10 secs).
-  void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics);
+  void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
 
   // Update the uniform averaged metrics: shorter time average (~RTCP report).
-  void UpdateUniformAvg(const VideoContentMetrics *contentMetrics);
+  void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
 
   VideoContentMetrics* recursive_avg_;
   VideoContentMetrics* uniform_avg_;
diff --git a/webrtc/modules/video_coding/decoding_state.cc b/webrtc/modules/video_coding/decoding_state.cc
index 0e021ad..89be9b6 100644
--- a/webrtc/modules/video_coding/decoding_state.cc
+++ b/webrtc/modules/video_coding/decoding_state.cc
@@ -166,8 +166,8 @@
         full_sync_ = ContinuousPictureId(frame->PictureId());
       }
     } else {
-      full_sync_ = ContinuousSeqNum(static_cast<uint16_t>(
-          frame->GetLowSeqNum()));
+      full_sync_ =
+          ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
     }
   }
 }
@@ -229,8 +229,7 @@
   return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
 }
 
-bool VCMDecodingState::ContinuousLayer(int temporal_id,
-                                       int tl0_pic_id) const {
+bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
   // First, check if applicable.
   if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
     return false;
diff --git a/webrtc/modules/video_coding/decoding_state.h b/webrtc/modules/video_coding/decoding_state.h
index fe40b24..f4ea8ae 100644
--- a/webrtc/modules/video_coding/decoding_state.h
+++ b/webrtc/modules/video_coding/decoding_state.h
@@ -64,13 +64,13 @@
 
   // Keep state of last decoded frame.
   // TODO(mikhal/stefan): create designated classes to handle these types.
-  uint16_t    sequence_num_;
-  uint32_t    time_stamp_;
-  int         picture_id_;
-  int         temporal_id_;
-  int         tl0_pic_id_;
-  bool        full_sync_;  // Sync flag when temporal layers are used.
-  bool        in_initial_state_;
+  uint16_t sequence_num_;
+  uint32_t time_stamp_;
+  int picture_id_;
+  int temporal_id_;
+  int tl0_pic_id_;
+  bool full_sync_;  // Sync flag when temporal layers are used.
+  bool in_initial_state_;
 
   // Used to check references in flexible mode.
   bool frame_decoded_[kFrameDecodedLength];
diff --git a/webrtc/modules/video_coding/encoded_frame.cc b/webrtc/modules/video_coding/encoded_frame.cc
index 611007a..261074a 100644
--- a/webrtc/modules/video_coding/encoded_frame.cc
+++ b/webrtc/modules/video_coding/encoded_frame.cc
@@ -24,7 +24,7 @@
       _fragmentation(),
       _rotation(kVideoRotation_0),
       _rotation_set(false) {
-    _codecSpecificInfo.codecType = kVideoCodecUnknown;
+  _codecSpecificInfo.codecType = kVideoCodecUnknown;
 }
 
 VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
@@ -36,15 +36,14 @@
       _fragmentation(),
       _rotation(kVideoRotation_0),
       _rotation_set(false) {
-    _codecSpecificInfo.codecType = kVideoCodecUnknown;
-    _buffer = NULL;
-    _size = 0;
-    _length = 0;
-    if (rhs._buffer != NULL)
-    {
-        VerifyAndAllocate(rhs._length);
-        memcpy(_buffer, rhs._buffer, rhs._length);
-    }
+  _codecSpecificInfo.codecType = kVideoCodecUnknown;
+  _buffer = NULL;
+  _size = 0;
+  _length = 0;
+  if (rhs._buffer != NULL) {
+    VerifyAndAllocate(rhs._length);
+    memcpy(_buffer, rhs._buffer, rhs._length);
+  }
 }
 
 VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
@@ -60,49 +59,43 @@
   _buffer = NULL;
   _size = 0;
   _length = 0;
-  if (rhs._buffer != NULL)
-  {
-      VerifyAndAllocate(rhs._length);
-      memcpy(_buffer, rhs._buffer, rhs._length);
-      _length = rhs._length;
+  if (rhs._buffer != NULL) {
+    VerifyAndAllocate(rhs._length);
+    memcpy(_buffer, rhs._buffer, rhs._length);
+    _length = rhs._length;
   }
   _fragmentation.CopyFrom(rhs._fragmentation);
 }
 
-VCMEncodedFrame::~VCMEncodedFrame()
-{
-    Free();
+VCMEncodedFrame::~VCMEncodedFrame() {
+  Free();
 }
 
-void VCMEncodedFrame::Free()
-{
-    Reset();
-    if (_buffer != NULL)
-    {
-        delete [] _buffer;
-        _buffer = NULL;
-    }
+void VCMEncodedFrame::Free() {
+  Reset();
+  if (_buffer != NULL) {
+    delete[] _buffer;
+    _buffer = NULL;
+  }
 }
 
-void VCMEncodedFrame::Reset()
-{
-    _renderTimeMs = -1;
-    _timeStamp = 0;
-    _payloadType = 0;
-    _frameType = kVideoFrameDelta;
-    _encodedWidth = 0;
-    _encodedHeight = 0;
-    _completeFrame = false;
-    _missingFrame = false;
-    _length = 0;
-    _codecSpecificInfo.codecType = kVideoCodecUnknown;
-    _codec = kVideoCodecUnknown;
-    _rotation = kVideoRotation_0;
-    _rotation_set = false;
+void VCMEncodedFrame::Reset() {
+  _renderTimeMs = -1;
+  _timeStamp = 0;
+  _payloadType = 0;
+  _frameType = kVideoFrameDelta;
+  _encodedWidth = 0;
+  _encodedHeight = 0;
+  _completeFrame = false;
+  _missingFrame = false;
+  _length = 0;
+  _codecSpecificInfo.codecType = kVideoCodecUnknown;
+  _codec = kVideoCodecUnknown;
+  _rotation = kVideoRotation_0;
+  _rotation_set = false;
 }
 
-void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
-{
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
   if (header) {
     switch (header->codec) {
       case kRtpVideoVp8: {
@@ -215,21 +208,18 @@
   return &_fragmentation;
 }
 
-void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize)
-{
-    if(minimumSize > _size)
-    {
-        // create buffer of sufficient size
-        uint8_t* newBuffer = new uint8_t[minimumSize];
-        if(_buffer)
-        {
-            // copy old data
-            memcpy(newBuffer, _buffer, _size);
-            delete [] _buffer;
-        }
-        _buffer = newBuffer;
-        _size = minimumSize;
+void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
+  if (minimumSize > _size) {
+    // create buffer of sufficient size
+    uint8_t* newBuffer = new uint8_t[minimumSize];
+    if (_buffer) {
+      // copy old data
+      memcpy(newBuffer, _buffer, _size);
+      delete[] _buffer;
     }
+    _buffer = newBuffer;
+    _size = minimumSize;
+  }
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_coding/encoded_frame.h b/webrtc/modules/video_coding/encoded_frame.h
index f311db2..9034200 100644
--- a/webrtc/modules/video_coding/encoded_frame.h
+++ b/webrtc/modules/video_coding/encoded_frame.h
@@ -19,109 +19,114 @@
 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
 #include "webrtc/modules/video_coding/include/video_coding_defines.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VCMEncodedFrame : protected EncodedImage
-{
-public:
-    VCMEncodedFrame();
-    VCMEncodedFrame(const webrtc::EncodedImage& rhs);
-    VCMEncodedFrame(const VCMEncodedFrame& rhs);
+class VCMEncodedFrame : protected EncodedImage {
+ public:
+  VCMEncodedFrame();
+  explicit VCMEncodedFrame(const webrtc::EncodedImage& rhs);
+  VCMEncodedFrame(const VCMEncodedFrame& rhs);
 
-    ~VCMEncodedFrame();
-    /**
-    *   Delete VideoFrame and resets members to zero
-    */
-    void Free();
-    /**
-    *   Set render time in milliseconds
-    */
-    void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;}
+  ~VCMEncodedFrame();
+  /**
+  *   Delete VideoFrame and resets members to zero
+  */
+  void Free();
+  /**
+  *   Set render time in milliseconds
+  */
+  void SetRenderTime(const int64_t renderTimeMs) {
+    _renderTimeMs = renderTimeMs;
+  }
 
-    /**
-    *   Set the encoded frame size
-    */
-    void SetEncodedSize(uint32_t width, uint32_t height)
-                       { _encodedWidth  = width; _encodedHeight = height; }
-    /**
-    *   Get the encoded image
-    */
-    const webrtc::EncodedImage& EncodedImage() const
-                       { return static_cast<const webrtc::EncodedImage&>(*this); }
-    /**
-    *   Get pointer to frame buffer
-    */
-    const uint8_t* Buffer() const {return _buffer;}
-    /**
-    *   Get frame length
-    */
-    size_t Length() const {return _length;}
-    /**
-    *   Get frame timestamp (90kHz)
-    */
-    uint32_t TimeStamp() const {return _timeStamp;}
-    /**
-    *   Get render time in milliseconds
-    */
-    int64_t RenderTimeMs() const {return _renderTimeMs;}
-    /**
-    *   Get frame type
-    */
-    webrtc::FrameType FrameType() const { return _frameType; }
-    /**
-    *   Get frame rotation
-    */
-    VideoRotation rotation() const { return _rotation; }
-    /**
-    *   True if this frame is complete, false otherwise
-    */
-    bool Complete() const { return _completeFrame; }
-    /**
-    *   True if there's a frame missing before this frame
-    */
-    bool MissingFrame() const { return _missingFrame; }
-    /**
-    *   Payload type of the encoded payload
-    */
-    uint8_t PayloadType() const { return _payloadType; }
-    /**
-    *   Get codec specific info.
-    *   The returned pointer is only valid as long as the VCMEncodedFrame
-    *   is valid. Also, VCMEncodedFrame owns the pointer and will delete
-    *   the object.
-    */
-    const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
+  /**
+  *   Set the encoded frame size
+  */
+  void SetEncodedSize(uint32_t width, uint32_t height) {
+    _encodedWidth = width;
+    _encodedHeight = height;
+  }
+  /**
+  *   Get the encoded image
+  */
+  const webrtc::EncodedImage& EncodedImage() const {
+    return static_cast<const webrtc::EncodedImage&>(*this);
+  }
+  /**
+  *   Get pointer to frame buffer
+  */
+  const uint8_t* Buffer() const { return _buffer; }
+  /**
+  *   Get frame length
+  */
+  size_t Length() const { return _length; }
+  /**
+  *   Get frame timestamp (90kHz)
+  */
+  uint32_t TimeStamp() const { return _timeStamp; }
+  /**
+  *   Get render time in milliseconds
+  */
+  int64_t RenderTimeMs() const { return _renderTimeMs; }
+  /**
+  *   Get frame type
+  */
+  webrtc::FrameType FrameType() const { return _frameType; }
+  /**
+  *   Get frame rotation
+  */
+  VideoRotation rotation() const { return _rotation; }
+  /**
+  *   True if this frame is complete, false otherwise
+  */
+  bool Complete() const { return _completeFrame; }
+  /**
+  *   True if there's a frame missing before this frame
+  */
+  bool MissingFrame() const { return _missingFrame; }
+  /**
+  *   Payload type of the encoded payload
+  */
+  uint8_t PayloadType() const { return _payloadType; }
+  /**
+  *   Get codec specific info.
+  *   The returned pointer is only valid as long as the VCMEncodedFrame
+  *   is valid. Also, VCMEncodedFrame owns the pointer and will delete
+  *   the object.
+  */
+  const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
 
-    const RTPFragmentationHeader* FragmentationHeader() const;
+  const RTPFragmentationHeader* FragmentationHeader() const;
 
-protected:
-    /**
-    * Verifies that current allocated buffer size is larger than or equal to the input size.
-    * If the current buffer size is smaller, a new allocation is made and the old buffer data
-    * is copied to the new buffer.
-    * Buffer size is updated to minimumSize.
-    */
-    void VerifyAndAllocate(size_t minimumSize);
+ protected:
+  /**
+  * Verifies that current allocated buffer size is larger than or equal to the
+  * input size.
+  * If the current buffer size is smaller, a new allocation is made and the old
+  * buffer data
+  * is copied to the new buffer.
+  * Buffer size is updated to minimumSize.
+  */
+  void VerifyAndAllocate(size_t minimumSize);
 
-    void Reset();
+  void Reset();
 
-    void CopyCodecSpecific(const RTPVideoHeader* header);
+  void CopyCodecSpecific(const RTPVideoHeader* header);
 
-    int64_t                 _renderTimeMs;
-    uint8_t                 _payloadType;
-    bool                          _missingFrame;
-    CodecSpecificInfo             _codecSpecificInfo;
-    webrtc::VideoCodecType        _codec;
-    RTPFragmentationHeader        _fragmentation;
-    VideoRotation                 _rotation;
+  int64_t _renderTimeMs;
+  uint8_t _payloadType;
+  bool _missingFrame;
+  CodecSpecificInfo _codecSpecificInfo;
+  webrtc::VideoCodecType _codec;
+  RTPFragmentationHeader _fragmentation;
+  VideoRotation _rotation;
 
-    // Video rotation is only set along with the last packet for each frame
-    // (same as marker bit). This |_rotation_set| is only for debugging purpose
-    // to ensure we don't set it twice for a frame.
-    bool                          _rotation_set;
+  // Video rotation is only set along with the last packet for each frame
+  // (same as marker bit). This |_rotation_set| is only for debugging purpose
+  // to ensure we don't set it twice for a frame.
+  bool _rotation_set;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/fec_tables_xor.h b/webrtc/modules/video_coding/fec_tables_xor.h
index 8121a35..fa5bd7b 100644
--- a/webrtc/modules/video_coding/fec_tables_xor.h
+++ b/webrtc/modules/video_coding/fec_tables_xor.h
@@ -22,6460 +22,438 @@
 // loss_j = 0,1,..128, and rate_i varies over some range.
 static const int kSizeCodeRateXORTable = 6450;
 static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-56,
-56,
-56,
-56,
-56,
-56,
-56,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-6,
-6,
-6,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-44,
-44,
-44,
-44,
-44,
-44,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-68,
-68,
-68,
-68,
-68,
-68,
-68,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-5,
-5,
-5,
-5,
-5,
-5,
-19,
-19,
-19,
-36,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-55,
-55,
-55,
-55,
-55,
-55,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-75,
-75,
-80,
-80,
-80,
-80,
-80,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-16,
-16,
-16,
-16,
-16,
-16,
-30,
-35,
-35,
-47,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-63,
-63,
-63,
-63,
-63,
-63,
-77,
-77,
-77,
-77,
-77,
-77,
-77,
-82,
-82,
-82,
-82,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-105,
-110,
-110,
-110,
-110,
-110,
-110,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-14,
-27,
-27,
-27,
-27,
-27,
-31,
-41,
-52,
-52,
-56,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-79,
-79,
-79,
-79,
-83,
-83,
-83,
-94,
-94,
-94,
-94,
-106,
-106,
-106,
-106,
-106,
-115,
-115,
-115,
-115,
-125,
-125,
-125,
-125,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-3,
-3,
-3,
-17,
-28,
-38,
-38,
-38,
-38,
-38,
-47,
-51,
-63,
-63,
-63,
-72,
-72,
-72,
-72,
-72,
-72,
-72,
-76,
-76,
-76,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-93,
-93,
-93,
-105,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-114,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-12,
-12,
-12,
-35,
-43,
-47,
-47,
-47,
-47,
-47,
-58,
-58,
-66,
-66,
-66,
-70,
-70,
-70,
-70,
-70,
-73,
-73,
-82,
-82,
-82,
-86,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-117,
-117,
-117,
-117,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-24,
-24,
-24,
-49,
-53,
-53,
-53,
-53,
-53,
-53,
-61,
-61,
-64,
-64,
-64,
-64,
-70,
-70,
-70,
-70,
-78,
-78,
-88,
-88,
-88,
-96,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-112,
-112,
-112,
-120,
-120,
-120,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-5,
-36,
-36,
-36,
-55,
-55,
-55,
-55,
-55,
-55,
-55,
-58,
-58,
-58,
-58,
-58,
-64,
-78,
-78,
-78,
-78,
-87,
-87,
-94,
-94,
-94,
-103,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-18,
-43,
-43,
-43,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-58,
-58,
-58,
-58,
-71,
-87,
-87,
-87,
-87,
-94,
-94,
-97,
-97,
-97,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-31,
-46,
-46,
-46,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-66,
-66,
-66,
-66,
-80,
-93,
-93,
-93,
-93,
-95,
-95,
-95,
-95,
-100,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-4,
-40,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-49,
-49,
-49,
-74,
-74,
-74,
-74,
-86,
-90,
-90,
-90,
-90,
-95,
-95,
-95,
-95,
-106,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-14,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-46,
-56,
-56,
-56,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-88,
-99,
-99,
-99,
-99,
-111,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-26,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-54,
-66,
-66,
-66,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-94,
-106,
-106,
-106,
-106,
-116,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-3,
-34,
-38,
-38,
-38,
-38,
-38,
-42,
-42,
-42,
-63,
-72,
-72,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-89,
-101,
-114,
-114,
-114,
-114,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-12,
-36,
-36,
-36,
-36,
-36,
-36,
-49,
-49,
-49,
-69,
-73,
-76,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-97,
-109,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-34,
-34,
-34,
-34,
-38,
-38,
-57,
-57,
-57,
-69,
-73,
-82,
-92,
-92,
-92,
-92,
-92,
-92,
-96,
-96,
-104,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-33,
-33,
-33,
-33,
-44,
-44,
-62,
-62,
-62,
-69,
-77,
-87,
-95,
-95,
-95,
-95,
-95,
-95,
-107,
-107,
-110,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-31,
-31,
-31,
-31,
-31,
-51,
-51,
-62,
-65,
-65,
-73,
-83,
-91,
-94,
-94,
-94,
-94,
-97,
-97,
-114,
-114,
-114,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-29,
-29,
-29,
-29,
-56,
-56,
-59,
-70,
-70,
-79,
-86,
-89,
-89,
-89,
-89,
-89,
-100,
-100,
-116,
-116,
-116,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-28,
-28,
-28,
-28,
-28,
-57,
-57,
-57,
-76,
-76,
-83,
-86,
-86,
-86,
-86,
-86,
-89,
-104,
-104,
-114,
-114,
-114,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-27,
-27,
-27,
-27,
-30,
-55,
-55,
-55,
-80,
-80,
-83,
-86,
-86,
-86,
-86,
-86,
-93,
-108,
-108,
-111,
-111,
-111,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-26,
-36,
-53,
-53,
-53,
-80,
-80,
-80,
-90,
-90,
-90,
-90,
-90,
-98,
-107,
-107,
-107,
-107,
-107,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-28,
-42,
-52,
-54,
-54,
-78,
-78,
-78,
-95,
-95,
-95,
-97,
-97,
-104,
-106,
-106,
-106,
-106,
-106,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-33,
-47,
-49,
-58,
-58,
-74,
-74,
-74,
-97,
-97,
-97,
-106,
-106,
-108,
-108,
-108,
-108,
-108,
-108,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-39,
-48,
-50,
-63,
-63,
-72,
-74,
-74,
-96,
-96,
-96,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-119,
-119,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-23,
-23,
-23,
-43,
-46,
-54,
-66,
-66,
-69,
-77,
-77,
-92,
-92,
-92,
-105,
-113,
-113,
-113,
-113,
-113,
-113,
-113,
-115,
-117,
-123,
-123,
-123,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-22,
-22,
-44,
-44,
-59,
-67,
-67,
-67,
-81,
-81,
-89,
-89,
-89,
-97,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-119,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-24,
-43,
-45,
-63,
-65,
-65,
-67,
-85,
-85,
-87,
-87,
-87,
-91,
-109,
-109,
-109,
-111,
-111,
-111,
-111,
-111,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-28,
-42,
-50,
-63,
-63,
-66,
-71,
-85,
-85,
-85,
-85,
-87,
-92,
-106,
-106,
-108,
-114,
-114,
-114,
-114,
-114,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-34,
-41,
-54,
-62,
-62,
-69,
-75,
-82,
-82,
-82,
-82,
-92,
-98,
-105,
-105,
-110,
-117,
-117,
-117,
-117,
-117,
-124,
-124,
-126,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-38,
-40,
-58,
-60,
-60,
-73,
-78,
-80,
-80,
-80,
-80,
-100,
-105,
-107,
-107,
-113,
-118,
-118,
-118,
-118,
-118,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-19,
-21,
-38,
-40,
-58,
-58,
-60,
-75,
-77,
-77,
-77,
-81,
-81,
-107,
-109,
-109,
-109,
-114,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-25,
-37,
-44,
-56,
-56,
-63,
-75,
-75,
-75,
-75,
-88,
-88,
-111,
-111,
-111,
-111,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-30,
-36,
-48,
-55,
-55,
-67,
-73,
-73,
-73,
-73,
-97,
-97,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-34,
-36,
-52,
-55,
-55,
-70,
-72,
-73,
-73,
-73,
-102,
-104,
-108,
-108,
-108,
-108,
-109,
-109,
-109,
-109,
-109,
-109,
-109,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-35,
-35,
-52,
-59,
-59,
-70,
-70,
-76,
-76,
-76,
-99,
-105,
-105,
-105,
-105,
-105,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-34,
-36,
-51,
-61,
-62,
-70,
-70,
-80,
-80,
-80,
-93,
-103,
-103,
-103,
-103,
-103,
-112,
-112,
-112,
-112,
-112,
-116,
-118,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-33,
-39,
-50,
-59,
-65,
-72,
-72,
-82,
-82,
-82,
-91,
-100,
-100,
-100,
-100,
-100,
-109,
-109,
-109,
-109,
-109,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-32,
-43,
-48,
-54,
-66,
-75,
-75,
-81,
-83,
-83,
-92,
-97,
-97,
-97,
-99,
-99,
-105,
-105,
-105,
-105,
-105,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-31,
-46,
-47,
-49,
-69,
-77,
-77,
-81,
-85,
-85,
-93,
-95,
-95,
-95,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-46,
-48,
-48,
-70,
-75,
-79,
-82,
-87,
-87,
-92,
-94,
-94,
-94,
-103,
-103,
-103,
-103,
-103,
-104,
-104,
-115,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-45,
-50,
-50,
-68,
-70,
-80,
-85,
-89,
-89,
-90,
-95,
-95,
-95,
-104,
-104,
-104,
-104,
-104,
-109,
-109,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-44,
-54,
-54,
-64,
-64,
-83,
-87,
-88,
-88,
-88,
-98,
-98,
-98,
-103,
-103,
-103,
-103,
-103,
-113,
-113,
-113,
-113,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-43,
-56,
-56,
-61,
-61,
-84,
-85,
-88,
-88,
-88,
-100,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-113,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-57,
-57,
-62,
-62,
-80,
-80,
-91,
-91,
-91,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-109,
-119,
-119,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-56,
-56,
-65,
-66,
-76,
-76,
-92,
-92,
-92,
-97,
-97,
-97,
-101,
-101,
-101,
-101,
-101,
-106,
-121,
-121,
-121,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-41,
-55,
-55,
-67,
-72,
-74,
-74,
-90,
-90,
-90,
-91,
-91,
-91,
-105,
-105,
-105,
-105,
-105,
-107,
-122,
-122,
-122,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-40,
-54,
-54,
-67,
-76,
-76,
-76,
-85,
-85,
-85,
-85,
-85,
-85,
-112,
-112,
-112,
-112,
-112,
-112,
-121,
-121,
-121,
-121,
-121,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-
-
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   11,  11,  11,  11,  11,  11,  11,  11,  11,  11,  11,  11,
+    11,  11,  11,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,
+    39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,
+    39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,  39,
+    39,  39,  39,  39,  39,  39,  51,  51,  51,  51,  51,  51,  51,  51,  51,
+    51,  51,  51,  51,  51,  51,  51,  51,  51,  51,  51,  51,  51,  51,  51,
+    51,  51,  51,  51,  51,  51,  51,  51,  51,  0,   0,   0,   0,   0,   0,
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   8,   8,   8,
+    8,   8,   8,   8,   8,   8,   8,   8,   8,   8,   8,   8,   30,  30,  30,
+    30,  30,  30,  30,  30,  30,  30,  30,  30,  30,  30,  30,  56,  56,  56,
+    56,  56,  56,  56,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,
+    65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,
+    65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,  65,
+    87,  87,  87,  87,  87,  87,  87,  87,  87,  87,  87,  87,  87,  87,  87,
+    87,  87,  87,  87,  87,  87,  87,  87,  87,  78,  78,  78,  78,  78,  78,
+    78,  78,  78,  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+    0,   0,   0,   6,   6,   6,   23,  23,  23,  23,  23,  23,  23,  23,  23,
+    23,  23,  23,  23,  23,  23,  44,  44,  44,  44,  44,  44,  50,  50,  50,
+    50,  50,  50,  50,  50,  50,  68,  68,  68,  68,  68,  68,  68,  85,  85,
+    85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,
+    85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,  85,
+    85,  85,  85,  85,  85,  85,  85,  85,  85,  105, 105, 105, 105, 105, 105,
+    105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105,
+    105, 105, 105, 88,  88,  88,  88,  88,  88,  88,  88,  88,  0,   0,   0,
+    0,   0,   0,   0,   0,   0,   5,   5,   5,   5,   5,   5,   19,  19,  19,
+    36,  41,  41,  41,  41,  41,  41,  41,  41,  41,  41,  41,  41,  41,  41,
+    55,  55,  55,  55,  55,  55,  69,  69,  69,  69,  69,  69,  69,  69,  69,
+    75,  75,  80,  80,  80,  80,  80,  97,  97,  97,  97,  97,  97,  97,  97,
+    97,  97,  102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+    102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+    102, 102, 102, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+    116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 100, 100, 100,
+    100, 100, 100, 100, 100, 100, 0,   0,   0,   0,   0,   0,   0,   0,   4,
+    16,  16,  16,  16,  16,  16,  30,  35,  35,  47,  58,  58,  58,  58,  58,
+    58,  58,  58,  58,  58,  58,  58,  58,  58,  63,  63,  63,  63,  63,  63,
+    77,  77,  77,  77,  77,  77,  77,  82,  82,  82,  82,  94,  94,  94,  94,
+    94,  105, 105, 105, 105, 110, 110, 110, 110, 110, 110, 122, 122, 122, 122,
+    122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+    122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 115, 115, 115, 115, 115, 115, 115, 115, 115,
+    0,   0,   0,   0,   0,   0,   0,   4,   14,  27,  27,  27,  27,  27,  31,
+    41,  52,  52,  56,  69,  69,  69,  69,  69,  69,  69,  69,  69,  69,  69,
+    69,  69,  69,  69,  69,  69,  69,  69,  69,  79,  79,  79,  79,  83,  83,
+    83,  94,  94,  94,  94,  106, 106, 106, 106, 106, 115, 115, 115, 115, 125,
+    125, 125, 125, 125, 125, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   0,   0,   0,   3,   3,
+    3,   17,  28,  38,  38,  38,  38,  38,  47,  51,  63,  63,  63,  72,  72,
+    72,  72,  72,  72,  72,  76,  76,  76,  76,  80,  80,  80,  80,  80,  80,
+    80,  80,  80,  84,  84,  84,  84,  93,  93,  93,  105, 105, 105, 105, 114,
+    114, 114, 114, 114, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   0,   0,   0,   12,  12,  12,  35,  43,  47,  47,  47,
+    47,  47,  58,  58,  66,  66,  66,  70,  70,  70,  70,  70,  73,  73,  82,
+    82,  82,  86,  94,  94,  94,  94,  94,  94,  94,  94,  94,  94,  94,  94,
+    94,  105, 105, 105, 114, 114, 114, 114, 117, 117, 117, 117, 117, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   0,   0,
+    0,   24,  24,  24,  49,  53,  53,  53,  53,  53,  53,  61,  61,  64,  64,
+    64,  64,  70,  70,  70,  70,  78,  78,  88,  88,  88,  96,  106, 106, 106,
+    106, 106, 106, 106, 106, 106, 106, 112, 112, 112, 120, 120, 120, 124, 124,
+    124, 124, 124, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   0,   0,   5,   36,  36,  36,  55,  55,
+    55,  55,  55,  55,  55,  58,  58,  58,  58,  58,  64,  78,  78,  78,  78,
+    87,  87,  94,  94,  94,  103, 110, 110, 110, 110, 110, 110, 110, 110, 116,
+    116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   0,   0,   18,  43,  43,  43,  53,  53,  53,  53,  53,  53,  53,  53,
+    58,  58,  58,  58,  71,  87,  87,  87,  87,  94,  94,  97,  97,  97,  109,
+    111, 111, 111, 111, 111, 111, 111, 111, 125, 125, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   0,   0,   31,  46,  46,
+    46,  48,  48,  48,  48,  48,  48,  48,  48,  66,  66,  66,  66,  80,  93,
+    93,  93,  93,  95,  95,  95,  95,  100, 115, 115, 115, 115, 115, 115, 115,
+    115, 115, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   0,   4,   40,  45,  45,  45,  45,  45,  45,  45,  45,
+    49,  49,  49,  74,  74,  74,  74,  86,  90,  90,  90,  90,  95,  95,  95,
+    95,  106, 120, 120, 120, 120, 120, 120, 120, 120, 120, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   0,   14,
+    42,  42,  42,  42,  42,  42,  42,  42,  46,  56,  56,  56,  80,  80,  80,
+    80,  84,  84,  84,  84,  88,  99,  99,  99,  99,  111, 122, 122, 122, 122,
+    122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   0,   26,  40,  40,  40,  40,  40,  40,
+    40,  40,  54,  66,  66,  66,  80,  80,  80,  80,  80,  80,  80,  84,  94,
+    106, 106, 106, 106, 116, 120, 120, 120, 120, 120, 120, 120, 120, 124, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   3,   34,  38,  38,  38,  38,  38,  42,  42,  42,  63,  72,  72,  76,
+    80,  80,  80,  80,  80,  80,  80,  89,  101, 114, 114, 114, 114, 118, 118,
+    118, 118, 118, 118, 118, 118, 118, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   12,  36,  36,  36,  36,
+    36,  36,  49,  49,  49,  69,  73,  76,  86,  86,  86,  86,  86,  86,  86,
+    86,  97,  109, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+    122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   22,  34,  34,  34,  34,  38,  38,  57,  57,  57,  69,
+    73,  82,  92,  92,  92,  92,  92,  92,  96,  96,  104, 117, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   29,  33,
+    33,  33,  33,  44,  44,  62,  62,  62,  69,  77,  87,  95,  95,  95,  95,
+    95,  95,  107, 107, 110, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   31,  31,  31,  31,  31,  51,  51,  62,
+    65,  65,  73,  83,  91,  94,  94,  94,  94,  97,  97,  114, 114, 114, 122,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   29,  29,  29,  29,  29,  56,  56,  59,  70,  70,  79,  86,  89,  89,
+    89,  89,  89,  100, 100, 116, 116, 116, 122, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   28,  28,  28,  28,  28,
+    57,  57,  57,  76,  76,  83,  86,  86,  86,  86,  86,  89,  104, 104, 114,
+    114, 114, 124, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   27,  27,  27,  27,  30,  55,  55,  55,  80,  80,  83,
+    86,  86,  86,  86,  86,  93,  108, 108, 111, 111, 111, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   26,  26,
+    26,  26,  36,  53,  53,  53,  80,  80,  80,  90,  90,  90,  90,  90,  98,
+    107, 107, 107, 107, 107, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   26,  26,  26,  28,  42,  52,  54,  54,
+    78,  78,  78,  95,  95,  95,  97,  97,  104, 106, 106, 106, 106, 106, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   24,  24,  24,  33,  47,  49,  58,  58,  74,  74,  74,  97,  97,  97,
+    106, 106, 108, 108, 108, 108, 108, 108, 124, 124, 124, 124, 124, 124, 124,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   24,  24,  24,  39,  48,
+    50,  63,  63,  72,  74,  74,  96,  96,  96,  109, 111, 111, 111, 111, 111,
+    111, 111, 119, 119, 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   23,  23,  23,  43,  46,  54,  66,  66,  69,  77,  77,
+    92,  92,  92,  105, 113, 113, 113, 113, 113, 113, 113, 115, 117, 123, 123,
+    123, 123, 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   22,  22,
+    22,  44,  44,  59,  67,  67,  67,  81,  81,  89,  89,  89,  97,  112, 112,
+    112, 112, 112, 112, 112, 112, 119, 126, 126, 126, 126, 126, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   21,  21,  24,  43,  45,  63,  65,  65,
+    67,  85,  85,  87,  87,  87,  91,  109, 109, 109, 111, 111, 111, 111, 111,
+    123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   21,  21,  28,  42,  50,  63,  63,  66,  71,  85,  85,  85,  85,  87,
+    92,  106, 106, 108, 114, 114, 114, 114, 114, 125, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   20,  20,  34,  41,  54,
+    62,  62,  69,  75,  82,  82,  82,  82,  92,  98,  105, 105, 110, 117, 117,
+    117, 117, 117, 124, 124, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   20,  20,  38,  40,  58,  60,  60,  73,  78,  80,  80,
+    80,  80,  100, 105, 107, 107, 113, 118, 118, 118, 118, 118, 120, 120, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   19,  21,
+    38,  40,  58,  58,  60,  75,  77,  77,  77,  81,  81,  107, 109, 109, 109,
+    114, 116, 116, 116, 116, 116, 116, 116, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   18,  25,  37,  44,  56,  56,  63,  75,
+    75,  75,  75,  88,  88,  111, 111, 111, 111, 112, 112, 112, 112, 112, 112,
+    112, 114, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   18,  30,  36,  48,  55,  55,  67,  73,  73,  73,  73,  97,  97,  110,
+    110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 116, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   18,  34,  36,  52,  55,
+    55,  70,  72,  73,  73,  73,  102, 104, 108, 108, 108, 108, 109, 109, 109,
+    109, 109, 109, 109, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   17,  35,  35,  52,  59,  59,  70,  70,  76,  76,  76,
+    99,  105, 105, 105, 105, 105, 111, 111, 111, 111, 111, 111, 111, 121, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   17,  34,
+    36,  51,  61,  62,  70,  70,  80,  80,  80,  93,  103, 103, 103, 103, 103,
+    112, 112, 112, 112, 112, 116, 118, 124, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   16,  33,  39,  50,  59,  65,  72,  72,
+    82,  82,  82,  91,  100, 100, 100, 100, 100, 109, 109, 109, 109, 109, 121,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   16,  32,  43,  48,  54,  66,  75,  75,  81,  83,  83,  92,  97,  97,
+    97,  99,  99,  105, 105, 105, 105, 105, 123, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   15,  31,  46,  47,  49,
+    69,  77,  77,  81,  85,  85,  93,  95,  95,  95,  100, 100, 102, 102, 102,
+    102, 102, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   15,  30,  46,  48,  48,  70,  75,  79,  82,  87,  87,
+    92,  94,  94,  94,  103, 103, 103, 103, 103, 104, 104, 115, 120, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   15,  30,
+    45,  50,  50,  68,  70,  80,  85,  89,  89,  90,  95,  95,  95,  104, 104,
+    104, 104, 104, 109, 109, 112, 114, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   14,  29,  44,  54,  54,  64,  64,  83,
+    87,  88,  88,  88,  98,  98,  98,  103, 103, 103, 103, 103, 113, 113, 113,
+    113, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    0,   14,  29,  43,  56,  56,  61,  61,  84,  85,  88,  88,  88,  100, 100,
+    100, 102, 102, 102, 102, 102, 113, 116, 116, 116, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   14,  28,  42,  57,  57,
+    62,  62,  80,  80,  91,  91,  91,  100, 100, 100, 100, 100, 100, 100, 100,
+    109, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 0,   14,  28,  42,  56,  56,  65,  66,  76,  76,  92,  92,
+    92,  97,  97,  97,  101, 101, 101, 101, 101, 106, 121, 121, 121, 126, 126,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0,   13,  27,
+    41,  55,  55,  67,  72,  74,  74,  90,  90,  90,  91,  91,  91,  105, 105,
+    105, 105, 105, 107, 122, 122, 122, 123, 123, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 0,   13,  27,  40,  54,  54,  67,  76,  76,
+    76,  85,  85,  85,  85,  85,  85,  112, 112, 112, 112, 112, 112, 121, 121,
+    121, 121, 121, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+    127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/frame_buffer.cc b/webrtc/modules/video_coding/frame_buffer.cc
index 012a8ac..b6ddeda 100644
--- a/webrtc/modules/video_coding/frame_buffer.cc
+++ b/webrtc/modules/video_coding/frame_buffer.cc
@@ -20,39 +20,30 @@
 namespace webrtc {
 
 VCMFrameBuffer::VCMFrameBuffer()
-  :
-    _state(kStateEmpty),
-    _nackCount(0),
-    _latestPacketTimeMs(-1) {
-}
+    : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
 
-VCMFrameBuffer::~VCMFrameBuffer() {
-}
+VCMFrameBuffer::~VCMFrameBuffer() {}
 
 VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
-:
-VCMEncodedFrame(rhs),
-_state(rhs._state),
-_sessionInfo(),
-_nackCount(rhs._nackCount),
-_latestPacketTimeMs(rhs._latestPacketTimeMs) {
-    _sessionInfo = rhs._sessionInfo;
-    _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
+    : VCMEncodedFrame(rhs),
+      _state(rhs._state),
+      _sessionInfo(),
+      _nackCount(rhs._nackCount),
+      _latestPacketTimeMs(rhs._latestPacketTimeMs) {
+  _sessionInfo = rhs._sessionInfo;
+  _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
 }
 
-webrtc::FrameType
-VCMFrameBuffer::FrameType() const {
-    return _sessionInfo.FrameType();
+webrtc::FrameType VCMFrameBuffer::FrameType() const {
+  return _sessionInfo.FrameType();
 }
 
-int32_t
-VCMFrameBuffer::GetLowSeqNum() const {
-    return _sessionInfo.LowSequenceNumber();
+int32_t VCMFrameBuffer::GetLowSeqNum() const {
+  return _sessionInfo.LowSequenceNumber();
 }
 
-int32_t
-VCMFrameBuffer::GetHighSeqNum() const {
-    return _sessionInfo.HighSequenceNumber();
+int32_t VCMFrameBuffer::GetHighSeqNum() const {
+  return _sessionInfo.HighSequenceNumber();
 }
 
 int VCMFrameBuffer::PictureId() const {
@@ -84,214 +75,196 @@
       gof_info.temporal_up_switch[idx];
 }
 
-bool
-VCMFrameBuffer::IsSessionComplete() const {
-    return _sessionInfo.complete();
+bool VCMFrameBuffer::IsSessionComplete() const {
+  return _sessionInfo.complete();
 }
 
 // Insert packet
-VCMFrameBufferEnum
-VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
-                             int64_t timeInMs,
-                             VCMDecodeErrorMode decode_error_mode,
-                             const FrameData& frame_data) {
-    assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
-    if (packet.dataPtr != NULL) {
-        _payloadType = packet.payloadType;
+VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
+    const VCMPacket& packet,
+    int64_t timeInMs,
+    VCMDecodeErrorMode decode_error_mode,
+    const FrameData& frame_data) {
+  assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+  if (packet.dataPtr != NULL) {
+    _payloadType = packet.payloadType;
+  }
+
+  if (kStateEmpty == _state) {
+    // First packet (empty and/or media) inserted into this frame.
+    // store some info and set some initial values.
+    _timeStamp = packet.timestamp;
+    // We only take the ntp timestamp of the first packet of a frame.
+    ntp_time_ms_ = packet.ntp_time_ms_;
+    _codec = packet.codec;
+    if (packet.frameType != kEmptyFrame) {
+      // first media packet
+      SetState(kStateIncomplete);
     }
+  }
 
-    if (kStateEmpty == _state) {
-        // First packet (empty and/or media) inserted into this frame.
-        // store some info and set some initial values.
-        _timeStamp = packet.timestamp;
-        // We only take the ntp timestamp of the first packet of a frame.
-        ntp_time_ms_ = packet.ntp_time_ms_;
-        _codec = packet.codec;
-        if (packet.frameType != kEmptyFrame) {
-            // first media packet
-            SetState(kStateIncomplete);
-        }
+  uint32_t requiredSizeBytes =
+      Length() + packet.sizeBytes +
+      (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+  if (requiredSizeBytes >= _size) {
+    const uint8_t* prevBuffer = _buffer;
+    const uint32_t increments =
+        requiredSizeBytes / kBufferIncStepSizeBytes +
+        (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
+    const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes;
+    if (newSize > kMaxJBFrameSizeBytes) {
+      LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+                       "big.";
+      return kSizeError;
     }
+    VerifyAndAllocate(newSize);
+    _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
+  }
 
-    uint32_t requiredSizeBytes = Length() + packet.sizeBytes +
-                   (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
-    if (requiredSizeBytes >= _size) {
-        const uint8_t* prevBuffer = _buffer;
-        const uint32_t increments = requiredSizeBytes /
-                                          kBufferIncStepSizeBytes +
-                                        (requiredSizeBytes %
-                                         kBufferIncStepSizeBytes > 0);
-        const uint32_t newSize = _size +
-                                       increments * kBufferIncStepSizeBytes;
-        if (newSize > kMaxJBFrameSizeBytes) {
-            LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
-                             "big.";
-            return kSizeError;
-        }
-        VerifyAndAllocate(newSize);
-        _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
-    }
+  if (packet.width > 0 && packet.height > 0) {
+    _encodedWidth = packet.width;
+    _encodedHeight = packet.height;
+  }
 
-    if (packet.width > 0 && packet.height > 0) {
-      _encodedWidth = packet.width;
-      _encodedHeight = packet.height;
-    }
+  // Don't copy payload specific data for empty packets (e.g padding packets).
+  if (packet.sizeBytes > 0)
+    CopyCodecSpecific(&packet.codecSpecificHeader);
 
-    // Don't copy payload specific data for empty packets (e.g padding packets).
-    if (packet.sizeBytes > 0)
-      CopyCodecSpecific(&packet.codecSpecificHeader);
+  int retVal =
+      _sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
+  if (retVal == -1) {
+    return kSizeError;
+  } else if (retVal == -2) {
+    return kDuplicatePacket;
+  } else if (retVal == -3) {
+    return kOutOfBoundsPacket;
+  }
+  // update length
+  _length = Length() + static_cast<uint32_t>(retVal);
 
-    int retVal = _sessionInfo.InsertPacket(packet, _buffer,
-                                           decode_error_mode,
-                                           frame_data);
-    if (retVal == -1) {
-        return kSizeError;
-    } else if (retVal == -2) {
-        return kDuplicatePacket;
-    } else if (retVal == -3) {
-        return kOutOfBoundsPacket;
-    }
-    // update length
-    _length = Length() + static_cast<uint32_t>(retVal);
+  _latestPacketTimeMs = timeInMs;
 
-    _latestPacketTimeMs = timeInMs;
+  // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+  // ts_126114v120700p.pdf Section 7.4.5.
+  // The MTSI client shall add the payload bytes as defined in this clause
+  // onto the last RTP packet in each group of packets which make up a key
+  // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+  // (HEVC)).
+  if (packet.markerBit) {
+    RTC_DCHECK(!_rotation_set);
+    _rotation = packet.codecSpecificHeader.rotation;
+    _rotation_set = true;
+  }
 
-    // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
-    // ts_126114v120700p.pdf Section 7.4.5.
-    // The MTSI client shall add the payload bytes as defined in this clause
-    // onto the last RTP packet in each group of packets which make up a key
-    // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
-    // (HEVC)).
-    if (packet.markerBit) {
-      RTC_DCHECK(!_rotation_set);
-      _rotation = packet.codecSpecificHeader.rotation;
-      _rotation_set = true;
-    }
-
-    if (_sessionInfo.complete()) {
-      SetState(kStateComplete);
-      return kCompleteSession;
-    } else if (_sessionInfo.decodable()) {
-      SetState(kStateDecodable);
-      return kDecodableSession;
-    }
-    return kIncomplete;
+  if (_sessionInfo.complete()) {
+    SetState(kStateComplete);
+    return kCompleteSession;
+  } else if (_sessionInfo.decodable()) {
+    SetState(kStateDecodable);
+    return kDecodableSession;
+  }
+  return kIncomplete;
 }
 
-int64_t
-VCMFrameBuffer::LatestPacketTimeMs() const {
-    return _latestPacketTimeMs;
+int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
+  return _latestPacketTimeMs;
 }
 
-void
-VCMFrameBuffer::IncrementNackCount() {
-    _nackCount++;
+void VCMFrameBuffer::IncrementNackCount() {
+  _nackCount++;
 }
 
-int16_t
-VCMFrameBuffer::GetNackCount() const {
-    return _nackCount;
+int16_t VCMFrameBuffer::GetNackCount() const {
+  return _nackCount;
 }
 
-bool
-VCMFrameBuffer::HaveFirstPacket() const {
-    return _sessionInfo.HaveFirstPacket();
+bool VCMFrameBuffer::HaveFirstPacket() const {
+  return _sessionInfo.HaveFirstPacket();
 }
 
-bool
-VCMFrameBuffer::HaveLastPacket() const {
-    return _sessionInfo.HaveLastPacket();
+bool VCMFrameBuffer::HaveLastPacket() const {
+  return _sessionInfo.HaveLastPacket();
 }
 
-int
-VCMFrameBuffer::NumPackets() const {
-    return _sessionInfo.NumPackets();
+int VCMFrameBuffer::NumPackets() const {
+  return _sessionInfo.NumPackets();
 }
 
-void
-VCMFrameBuffer::Reset() {
-    _length = 0;
-    _timeStamp = 0;
-    _sessionInfo.Reset();
-    _payloadType = 0;
-    _nackCount = 0;
-    _latestPacketTimeMs = -1;
-    _state = kStateEmpty;
-    VCMEncodedFrame::Reset();
+void VCMFrameBuffer::Reset() {
+  _length = 0;
+  _timeStamp = 0;
+  _sessionInfo.Reset();
+  _payloadType = 0;
+  _nackCount = 0;
+  _latestPacketTimeMs = -1;
+  _state = kStateEmpty;
+  VCMEncodedFrame::Reset();
 }
 
 // Set state of frame
-void
-VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
-    if (_state == state) {
-        return;
-    }
-    switch (state) {
+void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+  if (_state == state) {
+    return;
+  }
+  switch (state) {
     case kStateIncomplete:
-        // we can go to this state from state kStateEmpty
-        assert(_state == kStateEmpty);
+      // we can go to this state from state kStateEmpty
+      assert(_state == kStateEmpty);
 
-        // Do nothing, we received a packet
-        break;
+      // Do nothing, we received a packet
+      break;
 
     case kStateComplete:
-        assert(_state == kStateEmpty ||
-               _state == kStateIncomplete ||
-               _state == kStateDecodable);
+      assert(_state == kStateEmpty || _state == kStateIncomplete ||
+             _state == kStateDecodable);
 
-        break;
+      break;
 
     case kStateEmpty:
-        // Should only be set to empty through Reset().
-        assert(false);
-        break;
+      // Should only be set to empty through Reset().
+      assert(false);
+      break;
 
     case kStateDecodable:
-        assert(_state == kStateEmpty ||
-               _state == kStateIncomplete);
-        break;
-    }
-    _state = state;
+      assert(_state == kStateEmpty || _state == kStateIncomplete);
+      break;
+  }
+  _state = state;
 }
 
 // Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState() const {
-    return _state;
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
+  return _state;
 }
 
 // Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
-    timeStamp = TimeStamp();
-    return GetState();
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
+  timeStamp = TimeStamp();
+  return GetState();
 }
 
-bool
-VCMFrameBuffer::IsRetransmitted() const {
-    return _sessionInfo.session_nack();
+bool VCMFrameBuffer::IsRetransmitted() const {
+  return _sessionInfo.session_nack();
 }
 
-void
-VCMFrameBuffer::PrepareForDecode(bool continuous) {
+void VCMFrameBuffer::PrepareForDecode(bool continuous) {
 #ifdef INDEPENDENT_PARTITIONS
-    if (_codec == kVideoCodecVP8) {
-        _length =
-            _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
-                                                     &_fragmentation);
-    } else {
-        size_t bytes_removed = _sessionInfo.MakeDecodable();
-        _length -= bytes_removed;
-    }
-#else
+  if (_codec == kVideoCodecVP8) {
+    _length = _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
+                                                       &_fragmentation);
+  } else {
     size_t bytes_removed = _sessionInfo.MakeDecodable();
     _length -= bytes_removed;
+  }
+#else
+  size_t bytes_removed = _sessionInfo.MakeDecodable();
+  _length -= bytes_removed;
 #endif
-    // Transfer frame information to EncodedFrame and create any codec
-    // specific information.
-    _frameType = _sessionInfo.FrameType();
-    _completeFrame = _sessionInfo.complete();
-    _missingFrame = !continuous;
+  // Transfer frame information to EncodedFrame and create any codec
+  // specific information.
+  _frameType = _sessionInfo.FrameType();
+  _completeFrame = _sessionInfo.complete();
+  _missingFrame = !continuous;
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_decoder.cc b/webrtc/modules/video_coding/generic_decoder.cc
index 3c72509..5cbe0f5 100644
--- a/webrtc/modules/video_coding/generic_decoder.cc
+++ b/webrtc/modules/video_coding/generic_decoder.cc
@@ -17,7 +17,7 @@
 
 namespace webrtc {
 
-VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing,
                                                  Clock* clock)
     : _critSect(CriticalSectionWrapper::CreateCriticalSection()),
       _clock(clock),
@@ -26,22 +26,19 @@
       _timestampMap(kDecoderFrameMemoryLength),
       _lastReceivedPictureID(0) {}
 
-VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
-{
-    delete _critSect;
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
+  delete _critSect;
 }
 
 void VCMDecodedFrameCallback::SetUserReceiveCallback(
-    VCMReceiveCallback* receiveCallback)
-{
-    CriticalSectionScoped cs(_critSect);
-    _receiveCallback = receiveCallback;
+    VCMReceiveCallback* receiveCallback) {
+  CriticalSectionScoped cs(_critSect);
+  _receiveCallback = receiveCallback;
 }
 
-VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback()
-{
-    CriticalSectionScoped cs(_critSect);
-    return _receiveCallback;
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
+  CriticalSectionScoped cs(_critSect);
+  return _receiveCallback;
 }
 
 int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
@@ -50,66 +47,57 @@
 
 int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
                                          int64_t decode_time_ms) {
-    TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
-                         "timestamp", decodedImage.timestamp());
-    // TODO(holmer): We should improve this so that we can handle multiple
-    // callbacks from one call to Decode().
-    VCMFrameInformation* frameInfo;
-    VCMReceiveCallback* callback;
-    {
-        CriticalSectionScoped cs(_critSect);
-        frameInfo = _timestampMap.Pop(decodedImage.timestamp());
-        callback = _receiveCallback;
-    }
-
-    if (frameInfo == NULL) {
-      LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
-                         "this one.";
-      return WEBRTC_VIDEO_CODEC_OK;
-    }
-
-    const int64_t now_ms = _clock->TimeInMilliseconds();
-    if (decode_time_ms < 0) {
-      decode_time_ms =
-          static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
-    }
-    _timing.StopDecodeTimer(
-        decodedImage.timestamp(),
-        decode_time_ms,
-        now_ms,
-        frameInfo->renderTimeMs);
-
-    if (callback != NULL)
-    {
-        decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
-        decodedImage.set_rotation(frameInfo->rotation);
-        callback->FrameToRender(decodedImage);
-    }
-    return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
-    const uint64_t pictureId)
-{
+  TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
+                       "timestamp", decodedImage.timestamp());
+  // TODO(holmer): We should improve this so that we can handle multiple
+  // callbacks from one call to Decode().
+  VCMFrameInformation* frameInfo;
+  VCMReceiveCallback* callback;
+  {
     CriticalSectionScoped cs(_critSect);
-    if (_receiveCallback != NULL)
-    {
-        return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
-    }
-    return -1;
+    frameInfo = _timestampMap.Pop(decodedImage.timestamp());
+    callback = _receiveCallback;
+  }
+
+  if (frameInfo == NULL) {
+    LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+                       "this one.";
+    return WEBRTC_VIDEO_CODEC_OK;
+  }
+
+  const int64_t now_ms = _clock->TimeInMilliseconds();
+  if (decode_time_ms < 0) {
+    decode_time_ms =
+        static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
+  }
+  _timing->StopDecodeTimer(decodedImage.timestamp(), decode_time_ms, now_ms,
+                           frameInfo->renderTimeMs);
+
+  if (callback != NULL) {
+    decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
+    decodedImage.set_rotation(frameInfo->rotation);
+    callback->FrameToRender(decodedImage);
+  }
+  return WEBRTC_VIDEO_CODEC_OK;
 }
 
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedFrame(const uint64_t pictureId)
-{
-    _lastReceivedPictureID = pictureId;
-    return 0;
+int32_t VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
+    const uint64_t pictureId) {
+  CriticalSectionScoped cs(_critSect);
+  if (_receiveCallback != NULL) {
+    return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
+  }
+  return -1;
 }
 
-uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const
-{
-    return _lastReceivedPictureID;
+int32_t VCMDecodedFrameCallback::ReceivedDecodedFrame(
+    const uint64_t pictureId) {
+  _lastReceivedPictureID = pictureId;
+  return 0;
+}
+
+uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const {
+  return _lastReceivedPictureID;
 }
 
 void VCMDecodedFrameCallback::OnDecoderImplementationName(
@@ -125,14 +113,12 @@
   _timestampMap.Add(timestamp, frameInfo);
 }
 
-int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp)
-{
-    CriticalSectionScoped cs(_critSect);
-    if (_timestampMap.Pop(timestamp) == NULL)
-    {
-        return VCM_GENERAL_ERROR;
-    }
-    return VCM_OK;
+int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) {
+  CriticalSectionScoped cs(_critSect);
+  if (_timestampMap.Pop(timestamp) == NULL) {
+    return VCM_GENERAL_ERROR;
+  }
+  return VCM_OK;
 }
 
 VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
@@ -147,12 +133,11 @@
 VCMGenericDecoder::~VCMGenericDecoder() {}
 
 int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
-                                      int32_t numberOfCores)
-{
-    TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode");
-    _codecType = settings->codecType;
+                                      int32_t numberOfCores) {
+  TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode");
+  _codecType = settings->codecType;
 
-    return _decoder->InitDecode(settings, numberOfCores);
+  return _decoder->InitDecode(settings, numberOfCores);
 }
 
 int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
@@ -169,16 +154,13 @@
                                    frame.CodecSpecific(), frame.RenderTimeMs());
 
     _callback->OnDecoderImplementationName(_decoder->ImplementationName());
-    if (ret < WEBRTC_VIDEO_CODEC_OK)
-    {
+    if (ret < WEBRTC_VIDEO_CODEC_OK) {
         LOG(LS_WARNING) << "Failed to decode frame with timestamp "
                         << frame.TimeStamp() << ", error code: " << ret;
         _callback->Pop(frame.TimeStamp());
         return ret;
-    }
-    else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
-             ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
-    {
+    } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
+               ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
         // No output
         _callback->Pop(frame.TimeStamp());
     }
@@ -207,4 +189,4 @@
   return _decoder->PrefersLateDecoding();
 }
 
-}  // namespace
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_decoder.h b/webrtc/modules/video_coding/generic_decoder.h
index b23462f..67ceabf 100644
--- a/webrtc/modules/video_coding/generic_decoder.h
+++ b/webrtc/modules/video_coding/generic_decoder.h
@@ -17,31 +17,29 @@
 #include "webrtc/modules/video_coding/timestamp_map.h"
 #include "webrtc/modules/video_coding/timing.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 class VCMReceiveCallback;
 
 enum { kDecoderFrameMemoryLength = 10 };
 
-struct VCMFrameInformation
-{
-    int64_t     renderTimeMs;
-    int64_t     decodeStartTimeMs;
-    void*             userData;
-    VideoRotation rotation;
+struct VCMFrameInformation {
+  int64_t renderTimeMs;
+  int64_t decodeStartTimeMs;
+  void* userData;
+  VideoRotation rotation;
 };
 
-class VCMDecodedFrameCallback : public DecodedImageCallback
-{
-public:
-    VCMDecodedFrameCallback(VCMTiming& timing, Clock* clock);
+class VCMDecodedFrameCallback : public DecodedImageCallback {
+ public:
+  VCMDecodedFrameCallback(VCMTiming* timing, Clock* clock);
     virtual ~VCMDecodedFrameCallback();
     void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
     VCMReceiveCallback* UserReceiveCallback();
 
-    virtual int32_t Decoded(VideoFrame& decodedImage);
-    virtual int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms);
+    virtual int32_t Decoded(VideoFrame& decodedImage);  // NOLINT
+    virtual int32_t Decoded(VideoFrame& decodedImage,   // NOLINT
+                            int64_t decode_time_ms);
     virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
     virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
 
@@ -51,65 +49,63 @@
     void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
     int32_t Pop(uint32_t timestamp);
 
-private:
+ private:
     // Protect |_receiveCallback| and |_timestampMap|.
     CriticalSectionWrapper* _critSect;
     Clock* _clock;
     VCMReceiveCallback* _receiveCallback GUARDED_BY(_critSect);
-    VCMTiming& _timing;
+    VCMTiming* _timing;
     VCMTimestampMap _timestampMap GUARDED_BY(_critSect);
     uint64_t _lastReceivedPictureID;
 };
 
+class VCMGenericDecoder {
+  friend class VCMCodecDataBase;
 
-class VCMGenericDecoder
-{
-    friend class VCMCodecDataBase;
-public:
-    VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
-    ~VCMGenericDecoder();
+ public:
+  explicit VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
+  ~VCMGenericDecoder();
 
-    /**
-    * Initialize the decoder with the information from the VideoCodec
-    */
-    int32_t InitDecode(const VideoCodec* settings,
-                             int32_t numberOfCores);
+  /**
+  * Initialize the decoder with the information from the VideoCodec
+  */
+  int32_t InitDecode(const VideoCodec* settings, int32_t numberOfCores);
 
-    /**
-    * Decode to a raw I420 frame,
-    *
-    * inputVideoBuffer reference to encoded video frame
-    */
-    int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
+  /**
+  * Decode to a raw I420 frame,
+  *
+  * inputVideoBuffer reference to encoded video frame
+  */
+  int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
 
-    /**
-    * Free the decoder memory
-    */
-    int32_t Release();
+  /**
+  * Free the decoder memory
+  */
+  int32_t Release();
 
-    /**
-    * Reset the decoder state, prepare for a new call
-    */
-    int32_t Reset();
+  /**
+  * Reset the decoder state, prepare for a new call
+  */
+  int32_t Reset();
 
-    /**
-    * Set decode callback. Deregistering while decoding is illegal.
-    */
-    int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+  /**
+  * Set decode callback. Deregistering while decoding is illegal.
+  */
+  int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
 
-    bool External() const;
-    bool PrefersLateDecoding() const;
+  bool External() const;
+  bool PrefersLateDecoding() const;
 
-private:
-    VCMDecodedFrameCallback*    _callback;
-    VCMFrameInformation         _frameInfos[kDecoderFrameMemoryLength];
-    uint32_t                    _nextFrameInfoIdx;
-    VideoDecoder* const         _decoder;
-    VideoCodecType              _codecType;
-    bool                        _isExternal;
-    bool                        _keyFrameDecoded;
+ private:
+  VCMDecodedFrameCallback* _callback;
+  VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
+  uint32_t _nextFrameInfoIdx;
+  VideoDecoder* const _decoder;
+  VideoCodecType _codecType;
+  bool _isExternal;
+  bool _keyFrameDecoded;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index ae5284b..c7444ce 100644
--- a/webrtc/modules/video_coding/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -8,12 +8,15 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "webrtc/modules/video_coding/generic_encoder.h"
+
+#include <vector>
+
 #include "webrtc/base/checks.h"
 #include "webrtc/base/logging.h"
 #include "webrtc/base/trace_event.h"
 #include "webrtc/engine_configurations.h"
 #include "webrtc/modules/video_coding/encoded_frame.h"
-#include "webrtc/modules/video_coding/generic_encoder.h"
 #include "webrtc/modules/video_coding/media_optimization.h"
 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
 
@@ -28,8 +31,7 @@
       rtp->codec = kRtpVideoVp8;
       rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
       rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
-      rtp->codecHeader.VP8.nonReference =
-          info->codecSpecific.VP8.nonReference;
+      rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
       rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
       rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
       rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
@@ -89,7 +91,7 @@
 }
 }  // namespace
 
-//#define DEBUG_ENCODER_BIT_STREAM
+// #define DEBUG_ENCODER_BIT_STREAM
 
 VCMGenericEncoder::VCMGenericEncoder(
     VideoEncoder* encoder,
@@ -195,10 +197,8 @@
   return encoder_params_;
 }
 
-int32_t
-VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
-{
-    return encoder_->SetPeriodicKeyFrames(enable);
+int32_t VCMGenericEncoder::SetPeriodicKeyFrames(bool enable) {
+  return encoder_->SetPeriodicKeyFrames(enable);
 }
 
 int32_t VCMGenericEncoder::RequestFrame(
@@ -207,10 +207,8 @@
   return encoder_->Encode(image, NULL, &frame_types);
 }
 
-bool
-VCMGenericEncoder::InternalSource() const
-{
-    return internal_source_;
+bool VCMGenericEncoder::InternalSource() const {
+  return internal_source_;
 }
 
 void VCMGenericEncoder::OnDroppedFrame() {
@@ -225,9 +223,9 @@
   return encoder_->GetTargetFramerate();
 }
 
- /***************************
-  * Callback Implementation
-  ***************************/
+/***************************
+ * Callback Implementation
+ ***************************/
 VCMEncodedFrameCallback::VCMEncodedFrameCallback(
     EncodedImageCallback* post_encode_callback)
     : send_callback_(),
@@ -242,22 +240,20 @@
 #endif
 {
 #ifdef DEBUG_ENCODER_BIT_STREAM
-    _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
+  _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
 #endif
 }
 
-VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
-{
+VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {
 #ifdef DEBUG_ENCODER_BIT_STREAM
-    fclose(_bitStreamAfterEncoder);
+  fclose(_bitStreamAfterEncoder);
 #endif
 }
 
-int32_t
-VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
-{
-    send_callback_ = transport;
-    return VCM_OK;
+int32_t VCMEncodedFrameCallback::SetTransportCallback(
+    VCMPacketizationCallback* transport) {
+  send_callback_ = transport;
+  return VCM_OK;
 }
 
 int32_t VCMEncodedFrameCallback::Encoded(
diff --git a/webrtc/modules/video_coding/generic_encoder.h b/webrtc/modules/video_coding/generic_encoder.h
index 5346b63..f739edb 100644
--- a/webrtc/modules/video_coding/generic_encoder.h
+++ b/webrtc/modules/video_coding/generic_encoder.h
@@ -11,11 +11,12 @@
 #ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
 #define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
 
+#include <stdio.h>
+#include <vector>
+
 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
 #include "webrtc/modules/video_coding/include/video_coding_defines.h"
 
-#include <stdio.h>
-
 #include "webrtc/base/criticalsection.h"
 #include "webrtc/base/scoped_ptr.h"
 
@@ -36,10 +37,10 @@
 /*************************************/
 /* VCMEncodeFrameCallback class     */
 /***********************************/
-class VCMEncodedFrameCallback : public EncodedImageCallback
-{
-public:
-    VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
+class VCMEncodedFrameCallback : public EncodedImageCallback {
+ public:
+    explicit VCMEncodedFrameCallback(
+      EncodedImageCallback* post_encode_callback);
     virtual ~VCMEncodedFrameCallback();
 
     /*
@@ -56,16 +57,21 @@
     /**
     * Set media Optimization
     */
-    void SetMediaOpt (media_optimization::MediaOptimization* mediaOpt);
+    void SetMediaOpt(media_optimization::MediaOptimization* mediaOpt);
 
-    void SetPayloadType(uint8_t payloadType) { _payloadType = payloadType; };
-    void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
+    void SetPayloadType(uint8_t payloadType) {
+      _payloadType = payloadType;
+    }
+
+    void SetInternalSource(bool internalSource) {
+      _internalSource = internalSource;
+    }
 
     void SetRotation(VideoRotation rotation) { _rotation = rotation; }
     void SignalLastEncoderImplementationUsed(
         const char* encoder_implementation_name);
 
-private:
+ private:
     VCMPacketizationCallback* send_callback_;
     media_optimization::MediaOptimization* _mediaOpt;
     uint8_t _payloadType;
@@ -77,68 +83,67 @@
 #ifdef DEBUG_ENCODER_BIT_STREAM
     FILE* _bitStreamAfterEncoder;
 #endif
-};// end of VCMEncodeFrameCallback class
-
+};  // end of VCMEncodeFrameCallback class
 
 /******************************/
 /* VCMGenericEncoder class    */
 /******************************/
-class VCMGenericEncoder
-{
-    friend class VCMCodecDataBase;
-public:
-    VCMGenericEncoder(VideoEncoder* encoder,
-                      VideoEncoderRateObserver* rate_observer,
-                      VCMEncodedFrameCallback* encoded_frame_callback,
-                      bool internalSource);
-    ~VCMGenericEncoder();
-    /**
-    * Free encoder memory
-    */
-    int32_t Release();
-    /**
-    * Initialize the encoder with the information from the VideoCodec
-    */
-    int32_t InitEncode(const VideoCodec* settings,
-                       int32_t numberOfCores,
-                       size_t maxPayloadSize);
-    /**
-    * Encode raw image
-    * inputFrame        : Frame containing raw image
-    * codecSpecificInfo : Specific codec data
-    * cameraFrameRate   : Request or information from the remote side
-    * frameType         : The requested frame type to encode
-    */
-    int32_t Encode(const VideoFrame& inputFrame,
-                   const CodecSpecificInfo* codecSpecificInfo,
-                   const std::vector<FrameType>& frameTypes);
+class VCMGenericEncoder {
+  friend class VCMCodecDataBase;
 
-    void SetEncoderParameters(const EncoderParameters& params);
-    EncoderParameters GetEncoderParameters() const;
+ public:
+  VCMGenericEncoder(VideoEncoder* encoder,
+                    VideoEncoderRateObserver* rate_observer,
+                    VCMEncodedFrameCallback* encoded_frame_callback,
+                    bool internalSource);
+  ~VCMGenericEncoder();
+  /**
+  * Free encoder memory
+  */
+  int32_t Release();
+  /**
+  * Initialize the encoder with the information from the VideoCodec
+  */
+  int32_t InitEncode(const VideoCodec* settings,
+                     int32_t numberOfCores,
+                     size_t maxPayloadSize);
+  /**
+  * Encode raw image
+  * inputFrame        : Frame containing raw image
+  * codecSpecificInfo : Specific codec data
+  * cameraFrameRate   : Request or information from the remote side
+  * frameType         : The requested frame type to encode
+  */
+  int32_t Encode(const VideoFrame& inputFrame,
+                 const CodecSpecificInfo* codecSpecificInfo,
+                 const std::vector<FrameType>& frameTypes);
 
-    int32_t SetPeriodicKeyFrames(bool enable);
+  void SetEncoderParameters(const EncoderParameters& params);
+  EncoderParameters GetEncoderParameters() const;
 
-    int32_t RequestFrame(const std::vector<FrameType>& frame_types);
+  int32_t SetPeriodicKeyFrames(bool enable);
 
-    bool InternalSource() const;
+  int32_t RequestFrame(const std::vector<FrameType>& frame_types);
 
-    void OnDroppedFrame();
+  bool InternalSource() const;
 
-    bool SupportsNativeHandle() const;
+  void OnDroppedFrame();
 
-    int GetTargetFramerate();
+  bool SupportsNativeHandle() const;
 
-private:
-    VideoEncoder* const encoder_;
-    VideoEncoderRateObserver* const rate_observer_;
-    VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
-    const bool internal_source_;
-    mutable rtc::CriticalSection params_lock_;
-    EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
-    VideoRotation rotation_;
-    bool is_screenshare_;
-}; // end of VCMGenericEncoder class
+  int GetTargetFramerate();
+
+ private:
+  VideoEncoder* const encoder_;
+  VideoEncoderRateObserver* const rate_observer_;
+  VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
+  const bool internal_source_;
+  mutable rtc::CriticalSection params_lock_;
+  EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
+  VideoRotation rotation_;
+  bool is_screenshare_;
+};  // end of VCMGenericEncoder class
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
index 47b94c0..0185dae 100644
--- a/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
+++ b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
@@ -20,14 +20,13 @@
 class MockVCMFrameTypeCallback : public VCMFrameTypeCallback {
  public:
   MOCK_METHOD0(RequestKeyFrame, int32_t());
-  MOCK_METHOD1(SliceLossIndicationRequest,
-               int32_t(const uint64_t pictureId));
+  MOCK_METHOD1(SliceLossIndicationRequest, int32_t(const uint64_t pictureId));
 };
 
 class MockPacketRequestCallback : public VCMPacketRequestCallback {
  public:
-  MOCK_METHOD2(ResendPackets, int32_t(const uint16_t* sequenceNumbers,
-                                      uint16_t length));
+  MOCK_METHOD2(ResendPackets,
+               int32_t(const uint16_t* sequenceNumbers, uint16_t length));
 };
 
 }  // namespace webrtc
diff --git a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
index 1eb228c..9cb4a83 100644
--- a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
@@ -12,6 +12,7 @@
 #define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
 
 #include <string>
+#include <vector>
 
 #include "testing/gmock/include/gmock/gmock.h"
 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
@@ -21,17 +22,19 @@
 
 class MockEncodedImageCallback : public EncodedImageCallback {
  public:
-  MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
-                                const CodecSpecificInfo* codecSpecificInfo,
-                                const RTPFragmentationHeader* fragmentation));
+  MOCK_METHOD3(Encoded,
+               int32_t(const EncodedImage& encodedImage,
+                       const CodecSpecificInfo* codecSpecificInfo,
+                       const RTPFragmentationHeader* fragmentation));
 };
 
 class MockVideoEncoder : public VideoEncoder {
  public:
-  MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
-  MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
-                                   int32_t numberOfCores,
-                                   size_t maxPayloadSize));
+  MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+  MOCK_METHOD3(InitEncode,
+               int32_t(const VideoCodec* codecSettings,
+                       int32_t numberOfCores,
+                       size_t maxPayloadSize));
   MOCK_METHOD3(Encode,
                int32_t(const VideoFrame& inputImage,
                        const CodecSpecificInfo* codecSpecificInfo,
@@ -47,24 +50,25 @@
 
 class MockDecodedImageCallback : public DecodedImageCallback {
  public:
-  MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));
-  MOCK_METHOD2(Decoded, int32_t(VideoFrame& decodedImage,
-                                int64_t decode_time_ms));
+  MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));  // NOLINT
+  MOCK_METHOD2(Decoded,
+               int32_t(VideoFrame& decodedImage,  // NOLINT
+                       int64_t decode_time_ms));
   MOCK_METHOD1(ReceivedDecodedReferenceFrame,
                int32_t(const uint64_t pictureId));
-  MOCK_METHOD1(ReceivedDecodedFrame,
-               int32_t(const uint64_t pictureId));
+  MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
 };
 
 class MockVideoDecoder : public VideoDecoder {
  public:
-  MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings,
-                                   int32_t numberOfCores));
-  MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage,
-                               bool missingFrames,
-                               const RTPFragmentationHeader* fragmentation,
-                               const CodecSpecificInfo* codecSpecificInfo,
-                               int64_t renderTimeMs));
+  MOCK_METHOD2(InitDecode,
+               int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+  MOCK_METHOD5(Decode,
+               int32_t(const EncodedImage& inputImage,
+                       bool missingFrames,
+                       const RTPFragmentationHeader* fragmentation,
+                       const CodecSpecificInfo* codecSpecificInfo,
+                       int64_t renderTimeMs));
   MOCK_METHOD1(RegisterDecodeCompleteCallback,
                int32_t(DecodedImageCallback* callback));
   MOCK_METHOD0(Release, int32_t());
diff --git a/webrtc/modules/video_coding/include/video_codec_interface.h b/webrtc/modules/video_coding/include/video_codec_interface.h
index 787e641..19303c0 100644
--- a/webrtc/modules/video_coding/include/video_codec_interface.h
+++ b/webrtc/modules/video_coding/include/video_codec_interface.h
@@ -21,10 +21,9 @@
 #include "webrtc/video_encoder.h"
 #include "webrtc/video_frame.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class RTPFragmentationHeader; // forward declaration
+class RTPFragmentationHeader;  // forward declaration
 
 // Note: if any pointers are added to this struct, it must be fitted
 // with a copy-constructor. See below.
@@ -90,12 +89,11 @@
 // Note: if any pointers are added to this struct or its sub-structs, it
 // must be fitted with a copy-constructor. This is because it is copied
 // in the copy-constructor of VCMEncodedFrame.
-struct CodecSpecificInfo
-{
-    VideoCodecType   codecType;
-    CodecSpecificInfoUnion codecSpecific;
+struct CodecSpecificInfo {
+  VideoCodecType codecType;
+  CodecSpecificInfoUnion codecSpecific;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/include/video_coding.h b/webrtc/modules/video_coding/include/video_coding.h
index 880180d..853e2a1 100644
--- a/webrtc/modules/video_coding/include/video_coding.h
+++ b/webrtc/modules/video_coding/include/video_coding.h
@@ -27,8 +27,7 @@
 #include "webrtc/system_wrappers/include/event_wrapper.h"
 #include "webrtc/video_frame.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 class Clock;
 class EncodedImageCallback;
@@ -47,494 +46,524 @@
  public:
   virtual ~EventFactoryImpl() {}
 
-  virtual EventWrapper* CreateEvent() {
-    return EventWrapper::Create();
-  }
+  virtual EventWrapper* CreateEvent() { return EventWrapper::Create(); }
 };
 
 // Used to indicate which decode with errors mode should be used.
 enum VCMDecodeErrorMode {
-  kNoErrors,                // Never decode with errors. Video will freeze
-                            // if nack is disabled.
-  kSelectiveErrors,         // Frames that are determined decodable in
-                            // VCMSessionInfo may be decoded with missing
-                            // packets. As not all incomplete frames will be
-                            // decodable, video will freeze if nack is disabled.
-  kWithErrors               // Release frames as needed. Errors may be
-                            // introduced as some encoded frames may not be
-                            // complete.
+  kNoErrors,         // Never decode with errors. Video will freeze
+                     // if nack is disabled.
+  kSelectiveErrors,  // Frames that are determined decodable in
+                     // VCMSessionInfo may be decoded with missing
+                     // packets. As not all incomplete frames will be
+                     // decodable, video will freeze if nack is disabled.
+  kWithErrors        // Release frames as needed. Errors may be
+                     // introduced as some encoded frames may not be
+                     // complete.
 };
 
-class VideoCodingModule : public Module
-{
-public:
-    enum SenderNackMode {
-        kNackNone,
-        kNackAll,
-        kNackSelective
-    };
+class VideoCodingModule : public Module {
+ public:
+  enum SenderNackMode { kNackNone, kNackAll, kNackSelective };
 
-    enum ReceiverRobustness {
-        kNone,
-        kHardNack,
-        kSoftNack,
-        kReferenceSelection
-    };
+  enum ReceiverRobustness { kNone, kHardNack, kSoftNack, kReferenceSelection };
 
-    static VideoCodingModule* Create(
-        Clock* clock,
-        VideoEncoderRateObserver* encoder_rate_observer,
-        VCMQMSettingsCallback* qm_settings_callback);
+  static VideoCodingModule* Create(
+      Clock* clock,
+      VideoEncoderRateObserver* encoder_rate_observer,
+      VCMQMSettingsCallback* qm_settings_callback);
 
-    static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
+  static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
 
-    static void Destroy(VideoCodingModule* module);
+  static void Destroy(VideoCodingModule* module);
 
-    // Get number of supported codecs
-    //
-    // Return value     : Number of supported codecs
-    static uint8_t NumberOfCodecs();
+  // Get number of supported codecs
+  //
+  // Return value     : Number of supported codecs
+  static uint8_t NumberOfCodecs();
 
-    // Get supported codec settings with using id
-    //
-    // Input:
-    //      - listId         : Id or index of the codec to look up
-    //      - codec          : Memory where the codec settings will be stored
-    //
-    // Return value     : VCM_OK,              on success
-    //                    VCM_PARAMETER_ERROR  if codec not supported or id too high
-    static int32_t Codec(const uint8_t listId, VideoCodec* codec);
+  // Get supported codec settings with using id
+  //
+  // Input:
+  //      - listId         : Id or index of the codec to look up
+  //      - codec          : Memory where the codec settings will be stored
+  //
+  // Return value     : VCM_OK,              on success
+  //                    VCM_PARAMETER_ERROR  if codec not supported or id too
+  //                    high
+  static int32_t Codec(const uint8_t listId, VideoCodec* codec);
 
-    // Get supported codec settings using codec type
-    //
-    // Input:
-    //      - codecType      : The codec type to get settings for
-    //      - codec          : Memory where the codec settings will be stored
-    //
-    // Return value     : VCM_OK,              on success
-    //                    VCM_PARAMETER_ERROR  if codec not supported
-    static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
+  // Get supported codec settings using codec type
+  //
+  // Input:
+  //      - codecType      : The codec type to get settings for
+  //      - codec          : Memory where the codec settings will be stored
+  //
+  // Return value     : VCM_OK,              on success
+  //                    VCM_PARAMETER_ERROR  if codec not supported
+  static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
 
-    /*
-    *   Sender
-    */
+  /*
+  *   Sender
+  */
 
-    // Registers a codec to be used for encoding. Calling this
-    // API multiple times overwrites any previously registered codecs.
-    //
-    // NOTE: Must be called on the thread that constructed the VCM instance.
-    //
-    // Input:
-    //      - sendCodec      : Settings for the codec to be registered.
-    //      - numberOfCores  : The number of cores the codec is allowed
-    //                         to use.
-    //      - maxPayloadSize : The maximum size each payload is allowed
-    //                                to have. Usually MTU - overhead.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
-                                            uint32_t numberOfCores,
-                                            uint32_t maxPayloadSize) = 0;
+  // Registers a codec to be used for encoding. Calling this
+  // API multiple times overwrites any previously registered codecs.
+  //
+  // NOTE: Must be called on the thread that constructed the VCM instance.
+  //
+  // Input:
+  //      - sendCodec      : Settings for the codec to be registered.
+  //      - numberOfCores  : The number of cores the codec is allowed
+  //                         to use.
+  //      - maxPayloadSize : The maximum size each payload is allowed
+  //                                to have. Usually MTU - overhead.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+                                    uint32_t numberOfCores,
+                                    uint32_t maxPayloadSize) = 0;
 
-    // Get the current send codec in use.
-    //
-    // If a codec has not been set yet, the |id| property of the return value
-    // will be 0 and |name| empty.
-    //
-    // NOTE: This method intentionally does not hold locks and minimizes data
-    // copying.  It must be called on the thread where the VCM was constructed.
-    virtual const VideoCodec& GetSendCodec() const = 0;
+  // Get the current send codec in use.
+  //
+  // If a codec has not been set yet, the |id| property of the return value
+  // will be 0 and |name| empty.
+  //
+  // NOTE: This method intentionally does not hold locks and minimizes data
+  // copying.  It must be called on the thread where the VCM was constructed.
+  virtual const VideoCodec& GetSendCodec() const = 0;
 
-    // DEPRECATED: Use GetSendCodec() instead.
-    //
-    // API to get the current send codec in use.
-    //
-    // Input:
-    //      - currentSendCodec : Address where the sendCodec will be written.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    //
-    // NOTE: The returned codec information is not guaranteed to be current when
-    // the call returns.  This method acquires a lock that is aligned with
-    // video encoding, so it should be assumed to be allowed to block for
-    // several milliseconds.
-    virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
+  // DEPRECATED: Use GetSendCodec() instead.
+  //
+  // API to get the current send codec in use.
+  //
+  // Input:
+  //      - currentSendCodec : Address where the sendCodec will be written.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  //
+  // NOTE: The returned codec information is not guaranteed to be current when
+  // the call returns.  This method acquires a lock that is aligned with
+  // video encoding, so it should be assumed to be allowed to block for
+  // several milliseconds.
+  virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
 
-    // DEPRECATED: Use GetSendCodec() instead.
-    //
-    // API to get the current send codec type
-    //
-    // Return value      : Codec type, on success.
-    //                     kVideoCodecUnknown, on error or if no send codec is set
-    // NOTE: Same notes apply as for SendCodec() above.
-    virtual VideoCodecType SendCodec() const = 0;
+  // DEPRECATED: Use GetSendCodec() instead.
+  //
+  // API to get the current send codec type
+  //
+  // Return value      : Codec type, on success.
+  //                     kVideoCodecUnknown, on error or if no send codec is set
+  // NOTE: Same notes apply as for SendCodec() above.
+  virtual VideoCodecType SendCodec() const = 0;
 
-    // Register an external encoder object. This can not be used together with
-    // external decoder callbacks.
-    //
-    // Input:
-    //      - externalEncoder : Encoder object to be used for encoding frames inserted
-    //                          with the AddVideoFrame API.
-    //      - payloadType     : The payload type bound which this encoder is bound to.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    // TODO(pbos): Remove return type when unused elsewhere.
-    virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
-                                            uint8_t payloadType,
-                                            bool internalSource = false) = 0;
+  // Register an external encoder object. This can not be used together with
+  // external decoder callbacks.
+  //
+  // Input:
+  //      - externalEncoder : Encoder object to be used for encoding frames
+  //      inserted
+  //                          with the AddVideoFrame API.
+  //      - payloadType     : The payload type bound which this encoder is bound
+  //      to.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  // TODO(pbos): Remove return type when unused elsewhere.
+  virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+                                          uint8_t payloadType,
+                                          bool internalSource = false) = 0;
 
-    // API to get currently configured encoder target bitrate in bits/s.
-    //
-    // Return value      : 0,   on success.
-    //                     < 0, on error.
-    virtual int Bitrate(unsigned int* bitrate) const = 0;
+  // API to get currently configured encoder target bitrate in bits/s.
+  //
+  // Return value      : 0,   on success.
+  //                     < 0, on error.
+  virtual int Bitrate(unsigned int* bitrate) const = 0;
 
-    // API to get currently configured encoder target frame rate.
-    //
-    // Return value      : 0,   on success.
-    //                     < 0, on error.
-    virtual int FrameRate(unsigned int* framerate) const = 0;
+  // API to get currently configured encoder target frame rate.
+  //
+  // Return value      : 0,   on success.
+  //                     < 0, on error.
+  virtual int FrameRate(unsigned int* framerate) const = 0;
 
-    // Sets the parameters describing the send channel. These parameters are inputs to the
-    // Media Optimization inside the VCM and also specifies the target bit rate for the
-    // encoder. Bit rate used by NACK should already be compensated for by the user.
-    //
-    // Input:
-    //      - target_bitrate        : The target bitrate for VCM in bits/s.
-    //      - lossRate              : Fractions of lost packets the past second.
-    //                                (loss rate in percent = 100 * packetLoss / 255)
-    //      - rtt                   : Current round-trip time in ms.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,         on error.
-    virtual int32_t SetChannelParameters(uint32_t target_bitrate,
-                                         uint8_t lossRate,
-                                         int64_t rtt) = 0;
+  // Sets the parameters describing the send channel. These parameters are
+  // inputs to the
+  // Media Optimization inside the VCM and also specifies the target bit rate
+  // for the
+  // encoder. Bit rate used by NACK should already be compensated for by the
+  // user.
+  //
+  // Input:
+  //      - target_bitrate        : The target bitrate for VCM in bits/s.
+  //      - lossRate              : Fractions of lost packets the past second.
+  //                                (loss rate in percent = 100 * packetLoss /
+  //                                255)
+  //      - rtt                   : Current round-trip time in ms.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,         on error.
+  virtual int32_t SetChannelParameters(uint32_t target_bitrate,
+                                       uint8_t lossRate,
+                                       int64_t rtt) = 0;
 
-    // Sets the parameters describing the receive channel. These parameters are inputs to the
-    // Media Optimization inside the VCM.
-    //
-    // Input:
-    //      - rtt                   : Current round-trip time in ms.
-    //                                with the most amount available bandwidth in a conference
-    //                                scenario
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
+  // Sets the parameters describing the receive channel. These parameters are
+  // inputs to the
+  // Media Optimization inside the VCM.
+  //
+  // Input:
+  //      - rtt                   : Current round-trip time in ms.
+  //                                with the most amount available bandwidth in
+  //                                a conference
+  //                                scenario
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
 
-    // Register a transport callback which will be called to deliver the encoded data and
-    // side information.
-    //
-    // Input:
-    //      - transport  : The callback object to register.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterTransportCallback(VCMPacketizationCallback* transport) = 0;
+  // Register a transport callback which will be called to deliver the encoded
+  // data and
+  // side information.
+  //
+  // Input:
+  //      - transport  : The callback object to register.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterTransportCallback(
+      VCMPacketizationCallback* transport) = 0;
 
-    // Register video output information callback which will be called to deliver information
-    // about the video stream produced by the encoder, for instance the average frame rate and
-    // bit rate.
-    //
-    // Input:
-    //      - outputInformation  : The callback object to register.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterSendStatisticsCallback(
-                                     VCMSendStatisticsCallback* sendStats) = 0;
+  // Register video output information callback which will be called to deliver
+  // information
+  // about the video stream produced by the encoder, for instance the average
+  // frame rate and
+  // bit rate.
+  //
+  // Input:
+  //      - outputInformation  : The callback object to register.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterSendStatisticsCallback(
+      VCMSendStatisticsCallback* sendStats) = 0;
 
-    // Register a video protection callback which will be called to deliver
-    // the requested FEC rate and NACK status (on/off).
-    //
-    // Input:
-    //      - protection  : The callback object to register.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterProtectionCallback(VCMProtectionCallback* protection) = 0;
+  // Register a video protection callback which will be called to deliver
+  // the requested FEC rate and NACK status (on/off).
+  //
+  // Input:
+  //      - protection  : The callback object to register.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterProtectionCallback(
+      VCMProtectionCallback* protection) = 0;
 
-    // Enable or disable a video protection method.
-    //
-    // Input:
-    //      - videoProtection  : The method to enable or disable.
-    //      - enable           : True if the method should be enabled, false if
-    //                           it should be disabled.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
-                                       bool enable) = 0;
+  // Enable or disable a video protection method.
+  //
+  // Input:
+  //      - videoProtection  : The method to enable or disable.
+  //      - enable           : True if the method should be enabled, false if
+  //                           it should be disabled.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
+                                     bool enable) = 0;
 
-    // Add one raw video frame to the encoder. This function does all the necessary
-    // processing, then decides what frame type to encode, or if the frame should be
-    // dropped. If the frame should be encoded it passes the frame to the encoder
-    // before it returns.
-    //
-    // Input:
-    //      - videoFrame        : Video frame to encode.
-    //      - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t AddVideoFrame(
-        const VideoFrame& videoFrame,
-        const VideoContentMetrics* contentMetrics = NULL,
-        const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
+  // Add one raw video frame to the encoder. This function does all the
+  // necessary
+  // processing, then decides what frame type to encode, or if the frame should
+  // be
+  // dropped. If the frame should be encoded it passes the frame to the encoder
+  // before it returns.
+  //
+  // Input:
+  //      - videoFrame        : Video frame to encode.
+  //      - codecSpecificInfo : Extra codec information, e.g., pre-parsed
+  //      in-band signaling.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t AddVideoFrame(
+      const VideoFrame& videoFrame,
+      const VideoContentMetrics* contentMetrics = NULL,
+      const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
 
-    // Next frame encoded should be an intra frame (keyframe).
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t IntraFrameRequest(int stream_index) = 0;
+  // Next frame encoded should be an intra frame (keyframe).
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t IntraFrameRequest(int stream_index) = 0;
 
-    // Frame Dropper enable. Can be used to disable the frame dropping when the encoder
-    // over-uses its bit rate. This API is designed to be used when the encoded frames
-    // are supposed to be stored to an AVI file, or when the I420 codec is used and the
-    // target bit rate shouldn't affect the frame rate.
-    //
-    // Input:
-    //      - enable            : True to enable the setting, false to disable it.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t EnableFrameDropper(bool enable) = 0;
+  // Frame Dropper enable. Can be used to disable the frame dropping when the
+  // encoder
+  // over-uses its bit rate. This API is designed to be used when the encoded
+  // frames
+  // are supposed to be stored to an AVI file, or when the I420 codec is used
+  // and the
+  // target bit rate shouldn't affect the frame rate.
+  //
+  // Input:
+  //      - enable            : True to enable the setting, false to disable it.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t EnableFrameDropper(bool enable) = 0;
 
+  /*
+  *   Receiver
+  */
 
-    /*
-    *   Receiver
-    */
+  // Register possible receive codecs, can be called multiple times for
+  // different codecs.
+  // The module will automatically switch between registered codecs depending on
+  // the
+  // payload type of incoming frames. The actual decoder will be created when
+  // needed.
+  //
+  // Input:
+  //      - receiveCodec      : Settings for the codec to be registered.
+  //      - numberOfCores     : Number of CPU cores that the decoder is allowed
+  //      to use.
+  //      - requireKeyFrame   : Set this to true if you don't want any delta
+  //      frames
+  //                            to be decoded until the first key frame has been
+  //                            decoded.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+                                       int32_t numberOfCores,
+                                       bool requireKeyFrame = false) = 0;
 
-    // Register possible receive codecs, can be called multiple times for different codecs.
-    // The module will automatically switch between registered codecs depending on the
-    // payload type of incoming frames. The actual decoder will be created when needed.
-    //
-    // Input:
-    //      - receiveCodec      : Settings for the codec to be registered.
-    //      - numberOfCores     : Number of CPU cores that the decoder is allowed to use.
-    //      - requireKeyFrame   : Set this to true if you don't want any delta frames
-    //                            to be decoded until the first key frame has been decoded.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
-                                         int32_t numberOfCores,
-                                         bool requireKeyFrame = false) = 0;
+  // Register an externally defined decoder/renderer object. Can be a decoder
+  // only or a
+  // decoder coupled with a renderer. Note that RegisterReceiveCodec must be
+  // called to
+  // be used for decoding incoming streams.
+  //
+  // Input:
+  //      - externalDecoder        : The external decoder/renderer object.
+  //      - payloadType            : The payload type which this decoder should
+  //      be
+  //                                 registered to.
+  //
+  virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+                                       uint8_t payloadType) = 0;
 
-    // Register an externally defined decoder/renderer object. Can be a decoder only or a
-    // decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to
-    // be used for decoding incoming streams.
-    //
-    // Input:
-    //      - externalDecoder        : The external decoder/renderer object.
-    //      - payloadType            : The payload type which this decoder should be
-    //                                 registered to.
-    //
-    virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
-                                         uint8_t payloadType) = 0;
+  // Register a receive callback. Will be called whenever there is a new frame
+  // ready
+  // for rendering.
+  //
+  // Input:
+  //      - receiveCallback        : The callback object to be used by the
+  //      module when a
+  //                                 frame is ready for rendering.
+  //                                 De-register with a NULL pointer.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterReceiveCallback(
+      VCMReceiveCallback* receiveCallback) = 0;
 
-    // Register a receive callback. Will be called whenever there is a new frame ready
-    // for rendering.
-    //
-    // Input:
-    //      - receiveCallback        : The callback object to be used by the module when a
-    //                                 frame is ready for rendering.
-    //                                 De-register with a NULL pointer.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0;
+  // Register a receive statistics callback which will be called to deliver
+  // information
+  // about the video stream received by the receiving side of the VCM, for
+  // instance the
+  // average frame rate and bit rate.
+  //
+  // Input:
+  //      - receiveStats  : The callback object to register.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterReceiveStatisticsCallback(
+      VCMReceiveStatisticsCallback* receiveStats) = 0;
 
-    // Register a receive statistics callback which will be called to deliver information
-    // about the video stream received by the receiving side of the VCM, for instance the
-    // average frame rate and bit rate.
-    //
-    // Input:
-    //      - receiveStats  : The callback object to register.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterReceiveStatisticsCallback(
-                               VCMReceiveStatisticsCallback* receiveStats) = 0;
+  // Register a decoder timing callback which will be called to deliver
+  // information about the timing of the decoder in the receiving side of the
+  // VCM, for instance the current and maximum frame decode latency.
+  //
+  // Input:
+  //      - decoderTiming  : The callback object to register.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterDecoderTimingCallback(
+      VCMDecoderTimingCallback* decoderTiming) = 0;
 
-    // Register a decoder timing callback which will be called to deliver
-    // information about the timing of the decoder in the receiving side of the
-    // VCM, for instance the current and maximum frame decode latency.
-    //
-    // Input:
-    //      - decoderTiming  : The callback object to register.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterDecoderTimingCallback(
-        VCMDecoderTimingCallback* decoderTiming) = 0;
+  // Register a frame type request callback. This callback will be called when
+  // the
+  // module needs to request specific frame types from the send side.
+  //
+  // Input:
+  //      - frameTypeCallback      : The callback object to be used by the
+  //      module when
+  //                                 requesting a specific type of frame from
+  //                                 the send side.
+  //                                 De-register with a NULL pointer.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t RegisterFrameTypeCallback(
+      VCMFrameTypeCallback* frameTypeCallback) = 0;
 
-    // Register a frame type request callback. This callback will be called when the
-    // module needs to request specific frame types from the send side.
-    //
-    // Input:
-    //      - frameTypeCallback      : The callback object to be used by the module when
-    //                                 requesting a specific type of frame from the send side.
-    //                                 De-register with a NULL pointer.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t RegisterFrameTypeCallback(
-                                  VCMFrameTypeCallback* frameTypeCallback) = 0;
+  // Registers a callback which is called whenever the receive side of the VCM
+  // encounters holes in the packet sequence and needs packets to be
+  // retransmitted.
+  //
+  // Input:
+  //              - callback      : The callback to be registered in the VCM.
+  //
+  // Return value     : VCM_OK,     on success.
+  //                    <0,         on error.
+  virtual int32_t RegisterPacketRequestCallback(
+      VCMPacketRequestCallback* callback) = 0;
 
-    // Registers a callback which is called whenever the receive side of the VCM
-    // encounters holes in the packet sequence and needs packets to be retransmitted.
-    //
-    // Input:
-    //              - callback      : The callback to be registered in the VCM.
-    //
-    // Return value     : VCM_OK,     on success.
-    //                    <0,         on error.
-    virtual int32_t RegisterPacketRequestCallback(
-                                        VCMPacketRequestCallback* callback) = 0;
+  // Waits for the next frame in the jitter buffer to become complete
+  // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
+  // decoding.
+  // Should be called as often as possible to get the most out of the decoder.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
 
-    // Waits for the next frame in the jitter buffer to become complete
-    // (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding.
-    // Should be called as often as possible to get the most out of the decoder.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+  // Registers a callback which conveys the size of the render buffer.
+  virtual int RegisterRenderBufferSizeCallback(
+      VCMRenderBufferSizeCallback* callback) = 0;
 
-    // Registers a callback which conveys the size of the render buffer.
-    virtual int RegisterRenderBufferSizeCallback(
-        VCMRenderBufferSizeCallback* callback) = 0;
+  // Reset the decoder state to the initial state.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t ResetDecoder() = 0;
 
-    // Reset the decoder state to the initial state.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t ResetDecoder() = 0;
+  // API to get the codec which is currently used for decoding by the module.
+  //
+  // Input:
+  //      - currentReceiveCodec      : Settings for the codec to be registered.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
 
-    // API to get the codec which is currently used for decoding by the module.
-    //
-    // Input:
-    //      - currentReceiveCodec      : Settings for the codec to be registered.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
+  // API to get the codec type currently used for decoding by the module.
+  //
+  // Return value      : codecy type,            on success.
+  //                     kVideoCodecUnknown, on error or if no receive codec is
+  //                     registered
+  virtual VideoCodecType ReceiveCodec() const = 0;
 
-    // API to get the codec type currently used for decoding by the module.
-    //
-    // Return value      : codecy type,            on success.
-    //                     kVideoCodecUnknown, on error or if no receive codec is registered
-    virtual VideoCodecType ReceiveCodec() const = 0;
+  // Insert a parsed packet into the receiver side of the module. Will be placed
+  // in the
+  // jitter buffer waiting for the frame to become complete. Returns as soon as
+  // the packet
+  // has been placed in the jitter buffer.
+  //
+  // Input:
+  //      - incomingPayload      : Payload of the packet.
+  //      - payloadLength        : Length of the payload.
+  //      - rtpInfo              : The parsed header.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+                                 size_t payloadLength,
+                                 const WebRtcRTPHeader& rtpInfo) = 0;
 
-    // Insert a parsed packet into the receiver side of the module. Will be placed in the
-    // jitter buffer waiting for the frame to become complete. Returns as soon as the packet
-    // has been placed in the jitter buffer.
-    //
-    // Input:
-    //      - incomingPayload      : Payload of the packet.
-    //      - payloadLength        : Length of the payload.
-    //      - rtpInfo              : The parsed header.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
-                                   size_t payloadLength,
-                                   const WebRtcRTPHeader& rtpInfo) = 0;
+  // Minimum playout delay (Used for lip-sync). This is the minimum delay
+  // required
+  // to sync with audio. Not included in  VideoCodingModule::Delay()
+  // Defaults to 0 ms.
+  //
+  // Input:
+  //      - minPlayoutDelayMs   : Additional delay in ms.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
 
-    // Minimum playout delay (Used for lip-sync). This is the minimum delay required
-    // to sync with audio. Not included in  VideoCodingModule::Delay()
-    // Defaults to 0 ms.
-    //
-    // Input:
-    //      - minPlayoutDelayMs   : Additional delay in ms.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
+  // Set the time required by the renderer to render a frame.
+  //
+  // Input:
+  //      - timeMS        : The time in ms required by the renderer to render a
+  //      frame.
+  //
+  // Return value      : VCM_OK, on success.
+  //                     < 0,    on error.
+  virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
 
-    // Set the time required by the renderer to render a frame.
-    //
-    // Input:
-    //      - timeMS        : The time in ms required by the renderer to render a frame.
-    //
-    // Return value      : VCM_OK, on success.
-    //                     < 0,    on error.
-    virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
+  // The total delay desired by the VCM. Can be less than the minimum
+  // delay set with SetMinimumPlayoutDelay.
+  //
+  // Return value      : Total delay in ms, on success.
+  //                     < 0,               on error.
+  virtual int32_t Delay() const = 0;
 
-    // The total delay desired by the VCM. Can be less than the minimum
-    // delay set with SetMinimumPlayoutDelay.
-    //
-    // Return value      : Total delay in ms, on success.
-    //                     < 0,               on error.
-    virtual int32_t Delay() const = 0;
+  // Returns the number of packets discarded by the jitter buffer due to being
+  // too late. This can include duplicated packets which arrived after the
+  // frame was sent to the decoder. Therefore packets which were prematurely
+  // NACKed will be counted.
+  virtual uint32_t DiscardedPackets() const = 0;
 
-    // Returns the number of packets discarded by the jitter buffer due to being
-    // too late. This can include duplicated packets which arrived after the
-    // frame was sent to the decoder. Therefore packets which were prematurely
-    // NACKed will be counted.
-    virtual uint32_t DiscardedPackets() const = 0;
+  // Robustness APIs
 
+  // Set the receiver robustness mode. The mode decides how the receiver
+  // responds to losses in the stream. The type of counter-measure (soft or
+  // hard NACK, dual decoder, RPS, etc.) is selected through the
+  // robustnessMode parameter. The errorMode parameter decides if it is
+  // allowed to display frames corrupted by losses. Note that not all
+  // combinations of the two parameters are feasible. An error will be
+  // returned for invalid combinations.
+  // Input:
+  //      - robustnessMode : selected robustness mode.
+  //      - errorMode      : selected error mode.
+  //
+  // Return value      : VCM_OK, on success;
+  //                     < 0, on error.
+  virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+                                        VCMDecodeErrorMode errorMode) = 0;
 
-    // Robustness APIs
+  // Set the decode error mode. The mode decides which errors (if any) are
+  // allowed in decodable frames. Note that setting decode_error_mode to
+  // anything other than kWithErrors without enabling nack will cause
+  // long-term freezes (resulting from frequent key frame requests) if
+  // packet loss occurs.
+  virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
 
-    // Set the receiver robustness mode. The mode decides how the receiver
-    // responds to losses in the stream. The type of counter-measure (soft or
-    // hard NACK, dual decoder, RPS, etc.) is selected through the
-    // robustnessMode parameter. The errorMode parameter decides if it is
-    // allowed to display frames corrupted by losses. Note that not all
-    // combinations of the two parameters are feasible. An error will be
-    // returned for invalid combinations.
-    // Input:
-    //      - robustnessMode : selected robustness mode.
-    //      - errorMode      : selected error mode.
-    //
-    // Return value      : VCM_OK, on success;
-    //                     < 0, on error.
-    virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
-                                          VCMDecodeErrorMode errorMode) = 0;
+  // Sets the maximum number of sequence numbers that we are allowed to NACK
+  // and the oldest sequence number that we will consider to NACK. If a
+  // sequence number older than |max_packet_age_to_nack| is missing
+  // a key frame will be requested. A key frame will also be requested if the
+  // time of incomplete or non-continuous frames in the jitter buffer is above
+  // |max_incomplete_time_ms|.
+  virtual void SetNackSettings(size_t max_nack_list_size,
+                               int max_packet_age_to_nack,
+                               int max_incomplete_time_ms) = 0;
 
-    // Set the decode error mode. The mode decides which errors (if any) are
-    // allowed in decodable frames. Note that setting decode_error_mode to
-    // anything other than kWithErrors without enabling nack will cause
-    // long-term freezes (resulting from frequent key frame requests) if
-    // packet loss occurs.
-    virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
+  // Setting a desired delay to the VCM receiver. Video rendering will be
+  // delayed by at least desired_delay_ms.
+  virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
 
-    // Sets the maximum number of sequence numbers that we are allowed to NACK
-    // and the oldest sequence number that we will consider to NACK. If a
-    // sequence number older than |max_packet_age_to_nack| is missing
-    // a key frame will be requested. A key frame will also be requested if the
-    // time of incomplete or non-continuous frames in the jitter buffer is above
-    // |max_incomplete_time_ms|.
-    virtual void SetNackSettings(size_t max_nack_list_size,
-                                 int max_packet_age_to_nack,
-                                 int max_incomplete_time_ms) = 0;
+  // Lets the sender suspend video when the rate drops below
+  // |threshold_bps|, and turns back on when the rate goes back up above
+  // |threshold_bps| + |window_bps|.
+  virtual void SuspendBelowMinBitrate() = 0;
 
-    // Setting a desired delay to the VCM receiver. Video rendering will be
-    // delayed by at least desired_delay_ms.
-    virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
+  // Returns true if SuspendBelowMinBitrate is engaged and the video has been
+  // suspended due to bandwidth limitations; otherwise false.
+  virtual bool VideoSuspended() const = 0;
 
-    // Lets the sender suspend video when the rate drops below
-    // |threshold_bps|, and turns back on when the rate goes back up above
-    // |threshold_bps| + |window_bps|.
-    virtual void SuspendBelowMinBitrate() = 0;
-
-    // Returns true if SuspendBelowMinBitrate is engaged and the video has been
-    // suspended due to bandwidth limitations; otherwise false.
-    virtual bool VideoSuspended() const = 0;
-
-    virtual void RegisterPreDecodeImageCallback(
-        EncodedImageCallback* observer) = 0;
-    virtual void RegisterPostEncodeImageCallback(
-        EncodedImageCallback* post_encode_callback) = 0;
-    // Releases pending decode calls, permitting faster thread shutdown.
-    virtual void TriggerDecoderShutdown() = 0;
+  virtual void RegisterPreDecodeImageCallback(
+      EncodedImageCallback* observer) = 0;
+  virtual void RegisterPostEncodeImageCallback(
+      EncodedImageCallback* post_encode_callback) = 0;
+  // Releases pending decode calls, permitting faster thread shutdown.
+  virtual void TriggerDecoderShutdown() = 0;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/include/video_coding_defines.h b/webrtc/modules/video_coding/include/video_coding_defines.h
index d057b55..673a02b 100644
--- a/webrtc/modules/video_coding/include/video_coding_defines.h
+++ b/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -18,23 +18,23 @@
 namespace webrtc {
 
 // Error codes
-#define VCM_FRAME_NOT_READY      3
-#define VCM_REQUEST_SLI          2
-#define VCM_MISSING_CALLBACK     1
-#define VCM_OK                   0
-#define VCM_GENERAL_ERROR       -1
-#define VCM_LEVEL_EXCEEDED      -2
-#define VCM_MEMORY              -3
-#define VCM_PARAMETER_ERROR     -4
-#define VCM_UNKNOWN_PAYLOAD     -5
-#define VCM_CODEC_ERROR         -6
-#define VCM_UNINITIALIZED       -7
+#define VCM_FRAME_NOT_READY 3
+#define VCM_REQUEST_SLI 2
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_LEVEL_EXCEEDED -2
+#define VCM_MEMORY -3
+#define VCM_PARAMETER_ERROR -4
+#define VCM_UNKNOWN_PAYLOAD -5
+#define VCM_CODEC_ERROR -6
+#define VCM_UNINITIALIZED -7
 #define VCM_NO_CODEC_REGISTERED -8
 #define VCM_JITTER_BUFFER_ERROR -9
-#define VCM_OLD_PACKET_ERROR    -10
-#define VCM_NO_FRAME_DECODED    -11
-#define VCM_ERROR_REQUEST_SLI   -12
-#define VCM_NOT_IMPLEMENTED     -20
+#define VCM_OLD_PACKET_ERROR -10
+#define VCM_NO_FRAME_DECODED -11
+#define VCM_ERROR_REQUEST_SLI -12
+#define VCM_NOT_IMPLEMENTED -20
 
 enum { kDefaultStartBitrateKbps = 300 };
 
@@ -65,16 +65,15 @@
   virtual void OnEncoderImplementationName(const char* implementation_name) {}
 
  protected:
-  virtual ~VCMPacketizationCallback() {
-  }
+  virtual ~VCMPacketizationCallback() {}
 };
 
-// Callback class used for passing decoded frames which are ready to be rendered.
+// Callback class used for passing decoded frames which are ready to be
+// rendered.
 class VCMReceiveCallback {
  public:
-  virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0;
-  virtual int32_t ReceivedDecodedReferenceFrame(
-      const uint64_t pictureId) {
+  virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0;  // NOLINT
+  virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {
     return -1;
   }
   // Called when the current receive codec changes.
@@ -82,23 +81,23 @@
   virtual void OnDecoderImplementationName(const char* implementation_name) {}
 
  protected:
-  virtual ~VCMReceiveCallback() {
-  }
+  virtual ~VCMReceiveCallback() {}
 };
 
-// Callback class used for informing the user of the bit rate and frame rate produced by the
+// Callback class used for informing the user of the bit rate and frame rate
+// produced by the
 // encoder.
 class VCMSendStatisticsCallback {
  public:
   virtual int32_t SendStatistics(const uint32_t bitRate,
-                                       const uint32_t frameRate) = 0;
+                                 const uint32_t frameRate) = 0;
 
  protected:
-  virtual ~VCMSendStatisticsCallback() {
-  }
+  virtual ~VCMSendStatisticsCallback() {}
 };
 
-// Callback class used for informing the user of the incoming bit rate and frame rate.
+// Callback class used for informing the user of the incoming bit rate and frame
+// rate.
 class VCMReceiveStatisticsCallback {
  public:
   virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0;
@@ -106,8 +105,7 @@
   virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0;
 
  protected:
-  virtual ~VCMReceiveStatisticsCallback() {
-  }
+  virtual ~VCMReceiveStatisticsCallback() {}
 };
 
 // Callback class used for informing the user of decode timing info.
@@ -136,8 +134,7 @@
                                 uint32_t* sent_fec_rate_bps) = 0;
 
  protected:
-  virtual ~VCMProtectionCallback() {
-  }
+  virtual ~VCMProtectionCallback() {}
 };
 
 class VideoEncoderRateObserver {
@@ -146,31 +143,30 @@
   virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0;
 };
 
-// Callback class used for telling the user about what frame type needed to continue decoding.
+// Callback class used for telling the user about what frame type needed to
+// continue decoding.
 // Typically a key frame when the stream has been corrupted in some way.
 class VCMFrameTypeCallback {
  public:
   virtual int32_t RequestKeyFrame() = 0;
-  virtual int32_t SliceLossIndicationRequest(
-      const uint64_t pictureId) {
+  virtual int32_t SliceLossIndicationRequest(const uint64_t pictureId) {
     return -1;
   }
 
  protected:
-  virtual ~VCMFrameTypeCallback() {
-  }
+  virtual ~VCMFrameTypeCallback() {}
 };
 
-// Callback class used for telling the user about which packet sequence numbers are currently
+// Callback class used for telling the user about which packet sequence numbers
+// are currently
 // missing and need to be resent.
 class VCMPacketRequestCallback {
  public:
   virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
-                                      uint16_t length) = 0;
+                                uint16_t length) = 0;
 
  protected:
-  virtual ~VCMPacketRequestCallback() {
-  }
+  virtual ~VCMPacketRequestCallback() {}
 };
 
 // Callback used to inform the user of the the desired resolution
@@ -178,14 +174,13 @@
 class VCMQMSettingsCallback {
  public:
   virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
-                                           const uint32_t width,
-                                           const uint32_t height) = 0;
+                                     const uint32_t width,
+                                     const uint32_t height) = 0;
 
   virtual void SetTargetFramerate(int frame_rate) = 0;
 
  protected:
-  virtual ~VCMQMSettingsCallback() {
-  }
+  virtual ~VCMQMSettingsCallback() {}
 };
 
 // Callback class used for telling the user about the size (in time) of the
@@ -195,10 +190,9 @@
   virtual void RenderBufferSizeMs(int buffer_size_ms) = 0;
 
  protected:
-  virtual ~VCMRenderBufferSizeCallback() {
-  }
+  virtual ~VCMRenderBufferSizeCallback() {}
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/webrtc/modules/video_coding/include/video_error_codes.h b/webrtc/modules/video_coding/include/video_error_codes.h
index 0e38937..360aa87 100644
--- a/webrtc/modules/video_coding/include/video_error_codes.h
+++ b/webrtc/modules/video_coding/include/video_error_codes.h
@@ -29,4 +29,4 @@
 #define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
 #define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/inter_frame_delay.cc b/webrtc/modules/video_coding/inter_frame_delay.cc
index 97cdd8a..fb3b54d 100644
--- a/webrtc/modules/video_coding/inter_frame_delay.cc
+++ b/webrtc/modules/video_coding/inter_frame_delay.cc
@@ -12,103 +12,96 @@
 
 namespace webrtc {
 
-VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock)
-{
-    Reset(currentWallClock);
+VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock) {
+  Reset(currentWallClock);
 }
 
 // Resets the delay estimate
-void
-VCMInterFrameDelay::Reset(int64_t currentWallClock)
-{
-    _zeroWallClock = currentWallClock;
-    _wrapArounds = 0;
-    _prevWallClock = 0;
-    _prevTimestamp = 0;
-    _dTS = 0;
+void VCMInterFrameDelay::Reset(int64_t currentWallClock) {
+  _zeroWallClock = currentWallClock;
+  _wrapArounds = 0;
+  _prevWallClock = 0;
+  _prevTimestamp = 0;
+  _dTS = 0;
 }
 
 // Calculates the delay of a frame with the given timestamp.
 // This method is called when the frame is complete.
-bool
-VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
-                                int64_t *delay,
-                                int64_t currentWallClock)
-{
-    if (_prevWallClock == 0)
-    {
-        // First set of data, initialization, wait for next frame
-        _prevWallClock = currentWallClock;
-        _prevTimestamp = timestamp;
-        *delay = 0;
-        return true;
-    }
-
-    int32_t prevWrapArounds = _wrapArounds;
-    CheckForWrapArounds(timestamp);
-
-    // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
-    int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
-
-    // Account for reordering in jitter variance estimate in the future?
-    // Note that this also captures incomplete frames which are grabbed
-    // for decoding after a later frame has been complete, i.e. real
-    // packet losses.
-    if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
-    {
-        *delay = 0;
-        return false;
-    }
-
-    // Compute the compensated timestamp difference and convert it to ms and
-    // round it to closest integer.
-    _dTS = static_cast<int64_t>((timestamp + wrapAroundsSincePrev *
-                (static_cast<int64_t>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
-
-    // frameDelay is the difference of dT and dTS -- i.e. the difference of
-    // the wall clock time difference and the timestamp difference between
-    // two following frames.
-    *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
-
-    _prevTimestamp = timestamp;
+bool VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
+                                        int64_t* delay,
+                                        int64_t currentWallClock) {
+  if (_prevWallClock == 0) {
+    // First set of data, initialization, wait for next frame
     _prevWallClock = currentWallClock;
-
+    _prevTimestamp = timestamp;
+    *delay = 0;
     return true;
+  }
+
+  int32_t prevWrapArounds = _wrapArounds;
+  CheckForWrapArounds(timestamp);
+
+  // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
+  int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
+
+  // Account for reordering in jitter variance estimate in the future?
+  // Note that this also captures incomplete frames which are grabbed
+  // for decoding after a later frame has been complete, i.e. real
+  // packet losses.
+  if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) ||
+      wrapAroundsSincePrev < 0) {
+    *delay = 0;
+    return false;
+  }
+
+  // Compute the compensated timestamp difference and convert it to ms and
+  // round it to closest integer.
+  _dTS = static_cast<int64_t>(
+      (timestamp + wrapAroundsSincePrev * (static_cast<int64_t>(1) << 32) -
+       _prevTimestamp) /
+          90.0 +
+      0.5);
+
+  // frameDelay is the difference of dT and dTS -- i.e. the difference of
+  // the wall clock time difference and the timestamp difference between
+  // two following frames.
+  *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
+
+  _prevTimestamp = timestamp;
+  _prevWallClock = currentWallClock;
+
+  return true;
 }
 
 // Returns the current difference between incoming timestamps
-uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const
-{
-    if (_dTS < 0)
-    {
-        return 0;
-    }
-    return static_cast<uint32_t>(_dTS);
+uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const {
+  if (_dTS < 0) {
+    return 0;
+  }
+  return static_cast<uint32_t>(_dTS);
 }
 
-// Investigates if the timestamp clock has overflowed since the last timestamp and
+// Investigates if the timestamp clock has overflowed since the last timestamp
+// and
 // keeps track of the number of wrap arounds since reset.
-void
-VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp)
-{
-    if (timestamp < _prevTimestamp)
-    {
-        // This difference will probably be less than -2^31 if we have had a wrap around
-        // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
-        // it should be positive.
-        if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0)
-        {
-            // Forward wrap around
-            _wrapArounds++;
-        }
+void VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp) {
+  if (timestamp < _prevTimestamp) {
+    // This difference will probably be less than -2^31 if we have had a wrap
+    // around
+    // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to
+    // a Word32,
+    // it should be positive.
+    if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0) {
+      // Forward wrap around
+      _wrapArounds++;
     }
-    // This difference will probably be less than -2^31 if we have had a backward wrap around.
+    // This difference will probably be less than -2^31 if we have had a
+    // backward
+    // wrap around.
     // Since it is cast to a Word32, it should be positive.
-    else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0)
-    {
-        // Backward wrap around
-        _wrapArounds--;
-    }
+  } else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0) {
+    // Backward wrap around
+    _wrapArounds--;
+  }
 }
-
-}
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/inter_frame_delay.h b/webrtc/modules/video_coding/inter_frame_delay.h
index 58b326a..94b7390 100644
--- a/webrtc/modules/video_coding/inter_frame_delay.h
+++ b/webrtc/modules/video_coding/inter_frame_delay.h
@@ -13,54 +13,55 @@
 
 #include "webrtc/typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VCMInterFrameDelay
-{
-public:
-    VCMInterFrameDelay(int64_t currentWallClock);
+class VCMInterFrameDelay {
+ public:
+  explicit VCMInterFrameDelay(int64_t currentWallClock);
 
-    // Resets the estimate. Zeros are given as parameters.
-    void Reset(int64_t currentWallClock);
+  // Resets the estimate. Zeros are given as parameters.
+  void Reset(int64_t currentWallClock);
 
-    // Calculates the delay of a frame with the given timestamp.
-    // This method is called when the frame is complete.
-    //
-    // Input:
-    //          - timestamp         : RTP timestamp of a received frame
-    //          - *delay            : Pointer to memory where the result should be stored
-    //          - currentWallClock  : The current time in milliseconds.
-    //                                Should be -1 for normal operation, only used for testing.
-    // Return value                 : true if OK, false when reordered timestamps
-    bool CalculateDelay(uint32_t timestamp,
-                        int64_t *delay,
-                        int64_t currentWallClock);
+  // Calculates the delay of a frame with the given timestamp.
+  // This method is called when the frame is complete.
+  //
+  // Input:
+  //          - timestamp         : RTP timestamp of a received frame
+  //          - *delay            : Pointer to memory where the result should be
+  //          stored
+  //          - currentWallClock  : The current time in milliseconds.
+  //                                Should be -1 for normal operation, only used
+  //                                for testing.
+  // Return value                 : true if OK, false when reordered timestamps
+  bool CalculateDelay(uint32_t timestamp,
+                      int64_t* delay,
+                      int64_t currentWallClock);
 
-    // Returns the current difference between incoming timestamps
-    //
-    // Return value                 : Wrap-around compensated difference between incoming
-    //                                timestamps.
-    uint32_t CurrentTimeStampDiffMs() const;
+  // Returns the current difference between incoming timestamps
+  //
+  // Return value                 : Wrap-around compensated difference between
+  // incoming
+  //                                timestamps.
+  uint32_t CurrentTimeStampDiffMs() const;
 
-private:
-    // Controls if the RTP timestamp counter has had a wrap around
-    // between the current and the previously received frame.
-    //
-    // Input:
-    //          - timestmap         : RTP timestamp of the current frame.
-    void CheckForWrapArounds(uint32_t timestamp);
+ private:
+  // Controls if the RTP timestamp counter has had a wrap around
+  // between the current and the previously received frame.
+  //
+  // Input:
+  //          - timestmap         : RTP timestamp of the current frame.
+  void CheckForWrapArounds(uint32_t timestamp);
 
-    int64_t         _zeroWallClock; // Local timestamp of the first video packet received
-    int32_t         _wrapArounds;   // Number of wrapArounds detected
-    // The previous timestamp passed to the delay estimate
-    uint32_t        _prevTimestamp;
-    // The previous wall clock timestamp used by the delay estimate
-    int64_t         _prevWallClock;
-    // Wrap-around compensated difference between incoming timestamps
-    int64_t         _dTS;
+  int64_t _zeroWallClock;  // Local timestamp of the first video packet received
+  int32_t _wrapArounds;    // Number of wrapArounds detected
+  // The previous timestamp passed to the delay estimate
+  uint32_t _prevTimestamp;
+  // The previous wall clock timestamp used by the delay estimate
+  int64_t _prevWallClock;
+  // Wrap-around compensated difference between incoming timestamps
+  int64_t _dTS;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/internal_defines.h b/webrtc/modules/video_coding/internal_defines.h
index 9b421a2..e225726 100644
--- a/webrtc/modules/video_coding/internal_defines.h
+++ b/webrtc/modules/video_coding/internal_defines.h
@@ -13,14 +13,12 @@
 
 #include "webrtc/typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 #define MASK_32_BITS(x) (0xFFFFFFFF & (x))
 
-inline uint32_t MaskWord64ToUWord32(int64_t w64)
-{
-    return static_cast<uint32_t>(MASK_32_BITS(w64));
+inline uint32_t MaskWord64ToUWord32(int64_t w64) {
+  return static_cast<uint32_t>(MASK_32_BITS(w64));
 }
 
 #define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
@@ -34,11 +32,10 @@
 
 #define VCM_NO_RECEIVER_ID 0
 
-inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0)
-{
-    return static_cast<int32_t>((vcmId << 16) + receiverId);
+inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0) {
+  return static_cast<int32_t>((vcmId << 16) + receiverId);
 }
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/jitter_buffer.cc b/webrtc/modules/video_coding/jitter_buffer.cc
index a381880..640bcb4 100644
--- a/webrtc/modules/video_coding/jitter_buffer.cc
+++ b/webrtc/modules/video_coding/jitter_buffer.cc
@@ -93,7 +93,7 @@
 }
 
 void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
-                                       UnorderedFrameList* free_frames) {
+                                        UnorderedFrameList* free_frames) {
   while (!empty()) {
     VCMFrameBuffer* oldest_frame = Front();
     bool remove_frame = false;
@@ -431,8 +431,8 @@
     if (incoming_bit_count_ == 0) {
       *bitrate = 0;
     } else {
-      *bitrate = 10 * ((100 * incoming_bit_count_) /
-                       static_cast<unsigned int>(diff));
+      *bitrate =
+          10 * ((100 * incoming_bit_count_) / static_cast<unsigned int>(diff));
     }
     incoming_bit_rate_ = *bitrate;
 
@@ -473,8 +473,8 @@
 
 // Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
 // complete frame, |max_wait_time_ms| decided by caller.
-bool VCMJitterBuffer::NextCompleteTimestamp(
-    uint32_t max_wait_time_ms, uint32_t* timestamp) {
+bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
+                                            uint32_t* timestamp) {
   crit_sect_->Enter();
   if (!running_) {
     crit_sect_->Leave();
@@ -484,13 +484,13 @@
 
   if (decodable_frames_.empty() ||
       decodable_frames_.Front()->GetState() != kStateComplete) {
-    const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() +
-        max_wait_time_ms;
+    const int64_t end_wait_time_ms =
+        clock_->TimeInMilliseconds() + max_wait_time_ms;
     int64_t wait_time_ms = max_wait_time_ms;
     while (wait_time_ms > 0) {
       crit_sect_->Leave();
       const EventTypeWrapper ret =
-        frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+          frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
       crit_sect_->Enter();
       if (ret == kEventSignaled) {
         // Are we shutting down the jitter buffer?
@@ -548,8 +548,8 @@
     // If we have exactly one frame in the buffer, release it only if it is
     // complete. We know decodable_frames_ is  not empty due to the previous
     // check.
-    if (decodable_frames_.size() == 1 && incomplete_frames_.empty()
-        && oldest_frame->GetState() != kStateComplete) {
+    if (decodable_frames_.size() == 1 && incomplete_frames_.empty() &&
+        oldest_frame->GetState() != kStateComplete) {
       return false;
     }
   }
@@ -588,8 +588,7 @@
     } else {
       // Wait for this one to get complete.
       waiting_for_completion_.frame_size = frame->Length();
-      waiting_for_completion_.latest_packet_time =
-          frame->LatestPacketTimeMs();
+      waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
       waiting_for_completion_.timestamp = frame->TimeStamp();
     }
   }
@@ -742,8 +741,8 @@
       frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
 
   if (previous_state != kStateComplete) {
-    TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
-                             "timestamp", frame->TimeStamp());
+    TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
+                             frame->TimeStamp());
   }
 
   if (buffer_state > 0) {
@@ -760,8 +759,8 @@
         buffer_state = kFlushIndicator;
       }
 
-      latest_received_sequence_number_ = LatestSequenceNumber(
-          latest_received_sequence_number_, packet.seqNum);
+      latest_received_sequence_number_ =
+          LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
     }
   }
 
@@ -794,8 +793,9 @@
       } else {
         incomplete_frames_.InsertFrame(frame);
         // If NACKs are enabled, keyframes are triggered by |GetNackList|.
-        if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() >
-            90 * kMaxDiscontinuousFramesTime) {
+        if (nack_mode_ == kNoNack &&
+            NonContinuousOrIncompleteDuration() >
+                90 * kMaxDiscontinuousFramesTime) {
           return kFlushIndicator;
         }
       }
@@ -809,8 +809,9 @@
       } else {
         incomplete_frames_.InsertFrame(frame);
         // If NACKs are enabled, keyframes are triggered by |GetNackList|.
-        if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() >
-            90 * kMaxDiscontinuousFramesTime) {
+        if (nack_mode_ == kNoNack &&
+            NonContinuousOrIncompleteDuration() >
+                90 * kMaxDiscontinuousFramesTime) {
           return kFlushIndicator;
         }
       }
@@ -831,12 +832,14 @@
     case kFlushIndicator:
       free_frames_.push_back(frame);
       return kFlushIndicator;
-    default: assert(false);
+    default:
+      assert(false);
   }
   return buffer_state;
 }
 
-bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
+bool VCMJitterBuffer::IsContinuousInState(
+    const VCMFrameBuffer& frame,
     const VCMDecodingState& decoding_state) const {
   // Is this frame (complete or decodable) and continuous?
   // kStateDecodable will never be set when decode_error_mode_ is false
@@ -854,7 +857,7 @@
   VCMDecodingState decoding_state;
   decoding_state.CopyFrom(last_decoded_state_);
   for (FrameList::const_iterator it = decodable_frames_.begin();
-       it != decodable_frames_.end(); ++it)  {
+       it != decodable_frames_.end(); ++it) {
     VCMFrameBuffer* decodable_frame = it->second;
     if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
       break;
@@ -887,7 +890,7 @@
   // 1. Continuous base or sync layer.
   // 2. The end of the list was reached.
   for (FrameList::iterator it = incomplete_frames_.begin();
-       it != incomplete_frames_.end();)  {
+       it != incomplete_frames_.end();) {
     VCMFrameBuffer* frame = it->second;
     if (IsNewerTimestamp(original_decoded_state.time_stamp(),
                          frame->TimeStamp())) {
@@ -997,16 +1000,18 @@
   if (last_decoded_state_.in_initial_state()) {
     VCMFrameBuffer* next_frame = NextFrame();
     const bool first_frame_is_key = next_frame &&
-        next_frame->FrameType() == kVideoFrameKey &&
-        next_frame->HaveFirstPacket();
+                                    next_frame->FrameType() == kVideoFrameKey &&
+                                    next_frame->HaveFirstPacket();
     if (!first_frame_is_key) {
-      bool have_non_empty_frame = decodable_frames_.end() != find_if(
-          decodable_frames_.begin(), decodable_frames_.end(),
-          HasNonEmptyState);
+      bool have_non_empty_frame =
+          decodable_frames_.end() != find_if(decodable_frames_.begin(),
+                                             decodable_frames_.end(),
+                                             HasNonEmptyState);
       if (!have_non_empty_frame) {
-        have_non_empty_frame = incomplete_frames_.end() != find_if(
-            incomplete_frames_.begin(), incomplete_frames_.end(),
-            HasNonEmptyState);
+        have_non_empty_frame =
+            incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
+                                                incomplete_frames_.end(),
+                                                HasNonEmptyState);
       }
       bool found_key_frame = RecycleFramesUntilKeyFrame();
       if (!found_key_frame) {
@@ -1025,8 +1030,8 @@
       LOG_F(LS_WARNING) << "Too long non-decodable duration: "
                         << non_continuous_incomplete_duration << " > "
                         << 90 * max_incomplete_time_ms_;
-      FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(),
-          incomplete_frames_.rend(), IsKeyFrame);
+      FrameList::reverse_iterator rit = find_if(
+          incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
       if (rit == incomplete_frames_.rend()) {
         // Request a key frame if we don't have one already.
         *request_key_frame = true;
@@ -1066,8 +1071,7 @@
   // Make sure we don't add packets which are already too old to be decoded.
   if (!last_decoded_state_.in_initial_state()) {
     latest_received_sequence_number_ = LatestSequenceNumber(
-        latest_received_sequence_number_,
-        last_decoded_state_.sequence_num());
+        latest_received_sequence_number_, last_decoded_state_.sequence_num());
   }
   if (IsNewerSequenceNumber(sequence_number,
                             latest_received_sequence_number_)) {
@@ -1117,8 +1121,8 @@
   if (missing_sequence_numbers_.empty()) {
     return false;
   }
-  const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
-      *missing_sequence_numbers_.begin();
+  const uint16_t age_of_oldest_missing_packet =
+      latest_sequence_number - *missing_sequence_numbers_.begin();
   // Recycle frames if the NACK list contains too old sequence numbers as
   // the packets may have already been dropped by the sender.
   return age_of_oldest_missing_packet > max_packet_age_to_nack_;
@@ -1126,8 +1130,8 @@
 
 bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
   bool key_frame_found = false;
-  const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
-      *missing_sequence_numbers_.begin();
+  const uint16_t age_of_oldest_missing_packet =
+      latest_sequence_number - *missing_sequence_numbers_.begin();
   LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
                     << age_of_oldest_missing_packet << " > "
                     << max_packet_age_to_nack_;
@@ -1141,9 +1145,9 @@
     uint16_t last_decoded_sequence_number) {
   // Erase all sequence numbers from the NACK list which we won't need any
   // longer.
-  missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(),
-                                  missing_sequence_numbers_.upper_bound(
-                                      last_decoded_sequence_number));
+  missing_sequence_numbers_.erase(
+      missing_sequence_numbers_.begin(),
+      missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
 }
 
 int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
@@ -1227,11 +1231,11 @@
   incoming_frame_count_++;
 
   if (frame.FrameType() == kVideoFrameKey) {
-    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
-                            frame.TimeStamp(), "KeyComplete");
+    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+                            "KeyComplete");
   } else {
-    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
-                            frame.TimeStamp(), "DeltaComplete");
+    TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+                            "DeltaComplete");
   }
 
   // Update receive statistics. We count all layers, thus when you use layers
@@ -1249,13 +1253,13 @@
 
 void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
   if (frame_counter_ > kFastConvergeThreshold) {
-    average_packets_per_frame_ = average_packets_per_frame_
-              * (1 - kNormalConvergeMultiplier)
-            + current_number_packets * kNormalConvergeMultiplier;
+    average_packets_per_frame_ =
+        average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+        current_number_packets * kNormalConvergeMultiplier;
   } else if (frame_counter_ > 0) {
-    average_packets_per_frame_ = average_packets_per_frame_
-              * (1 - kFastConvergeMultiplier)
-            + current_number_packets * kFastConvergeMultiplier;
+    average_packets_per_frame_ =
+        average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+        current_number_packets * kFastConvergeMultiplier;
     frame_counter_++;
   } else {
     average_packets_per_frame_ = current_number_packets;
@@ -1277,7 +1281,7 @@
 // Must be called from within |crit_sect_|.
 bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
   return missing_sequence_numbers_.find(packet.seqNum) !=
-      missing_sequence_numbers_.end();
+         missing_sequence_numbers_.end();
 }
 
 // Must be called under the critical section |crit_sect_|. Should never be
@@ -1309,18 +1313,16 @@
 // Must be called under the critical section |crit_sect_|. Should never be
 // called with retransmitted frames, they must be filtered out before this
 // function is called.
-void VCMJitterBuffer::UpdateJitterEstimate(
-    int64_t latest_packet_time_ms,
-    uint32_t timestamp,
-    unsigned int frame_size,
-    bool incomplete_frame) {
+void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
+                                           uint32_t timestamp,
+                                           unsigned int frame_size,
+                                           bool incomplete_frame) {
   if (latest_packet_time_ms == -1) {
     return;
   }
   int64_t frame_delay;
-  bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp,
-                                                      &frame_delay,
-                                                      latest_packet_time_ms);
+  bool not_reordered = inter_frame_delay_.CalculateDelay(
+      timestamp, &frame_delay, latest_packet_time_ms);
   // Filter out frames which have been reordered in time by the network
   if (not_reordered) {
     // Update the jitter estimate with the new samples
diff --git a/webrtc/modules/video_coding/jitter_buffer.h b/webrtc/modules/video_coding/jitter_buffer.h
index 1d5edeb..01e2775 100644
--- a/webrtc/modules/video_coding/jitter_buffer.h
+++ b/webrtc/modules/video_coding/jitter_buffer.h
@@ -30,10 +30,7 @@
 
 namespace webrtc {
 
-enum VCMNackMode {
-  kNack,
-  kNoNack
-};
+enum VCMNackMode { kNack, kNoNack };
 
 // forward declarations
 class Clock;
@@ -54,8 +51,7 @@
 
 class TimestampLessThan {
  public:
-  bool operator() (uint32_t timestamp1,
-                   uint32_t timestamp2) const {
+  bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
     return IsNewerTimestamp(timestamp2, timestamp1);
   }
 };
@@ -68,7 +64,7 @@
   VCMFrameBuffer* Front() const;
   VCMFrameBuffer* Back() const;
   int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
-      UnorderedFrameList* free_frames);
+                                 UnorderedFrameList* free_frames);
   void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
                                UnorderedFrameList* free_frames);
   void Reset(UnorderedFrameList* free_frames);
@@ -141,8 +137,7 @@
   int num_discarded_packets() const;
 
   // Statistics, Calculate frame and bit rates.
-  void IncomingRateStatistics(unsigned int* framerate,
-                              unsigned int* bitrate);
+  void IncomingRateStatistics(unsigned int* framerate, unsigned int* bitrate);
 
   // Checks if the packet sequence will be complete if the next frame would be
   // grabbed for decoding. That is, if a frame has been lost between the
@@ -177,8 +172,7 @@
   // Inserts a packet into a frame returned from GetFrame().
   // If the return value is <= 0, |frame| is invalidated and the pointer must
   // be dropped after this function returns.
-  VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
-                                  bool* retransmitted);
+  VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
 
   // Returns the estimated jitter in milliseconds.
   uint32_t EstimatedJitterMs();
@@ -192,7 +186,8 @@
   // |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect
   // to rely on NACK only, and therefore are using larger buffers to have time
   // to wait for retransmissions.
-  void SetNackMode(VCMNackMode mode, int64_t low_rtt_nack_threshold_ms,
+  void SetNackMode(VCMNackMode mode,
+                   int64_t low_rtt_nack_threshold_ms,
                    int64_t high_rtt_nack_threshold_ms);
 
   void SetNackSettings(size_t max_nack_list_size,
@@ -209,7 +204,7 @@
   // session. Changes will not influence frames already in the buffer.
   void SetDecodeErrorMode(VCMDecodeErrorMode error_mode);
   int64_t LastDecodedTimestamp() const;
-  VCMDecodeErrorMode decode_error_mode() const {return decode_error_mode_;}
+  VCMDecodeErrorMode decode_error_mode() const { return decode_error_mode_; }
 
   // Used to compute time of complete continuous frames. Returns the timestamps
   // corresponding to the start and end of the continuous complete buffer.
@@ -220,8 +215,8 @@
  private:
   class SequenceNumberLessThan {
    public:
-    bool operator() (const uint16_t& sequence_number1,
-                     const uint16_t& sequence_number2) const {
+    bool operator()(const uint16_t& sequence_number1,
+                    const uint16_t& sequence_number2) const {
       return IsNewerSequenceNumber(sequence_number2, sequence_number1);
     }
   };
diff --git a/webrtc/modules/video_coding/jitter_buffer_common.h b/webrtc/modules/video_coding/jitter_buffer_common.h
index 97af780..65356f1 100644
--- a/webrtc/modules/video_coding/jitter_buffer_common.h
+++ b/webrtc/modules/video_coding/jitter_buffer_common.h
@@ -19,11 +19,11 @@
 static const float kFastConvergeMultiplier = 0.4f;
 static const float kNormalConvergeMultiplier = 0.2f;
 
-enum { kMaxNumberOfFrames     = 300 };
-enum { kStartNumberOfFrames   = 6 };
-enum { kMaxVideoDelayMs       = 10000 };
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
 enum { kPacketsPerFrameMultiplier = 5 };
-enum { kFastConvergeThreshold = 5};
+enum { kFastConvergeThreshold = 5 };
 
 enum VCMJitterBufferEnum {
   kMaxConsecutiveOldFrames = 60,
@@ -36,36 +36,36 @@
 };
 
 enum VCMFrameBufferEnum {
-  kOutOfBoundsPacket    = -7,
-  kNotInitialized       = -6,
-  kOldPacket            = -5,
-  kGeneralError         = -4,
-  kFlushIndicator       = -3,   // Indicator that a flush has occurred.
-  kTimeStampError       = -2,
-  kSizeError            = -1,
-  kNoError              = 0,
-  kIncomplete           = 1,    // Frame incomplete.
-  kCompleteSession      = 3,    // at least one layer in the frame complete.
-  kDecodableSession     = 4,    // Frame incomplete, but ready to be decoded
-  kDuplicatePacket      = 5     // We're receiving a duplicate packet.
+  kOutOfBoundsPacket = -7,
+  kNotInitialized = -6,
+  kOldPacket = -5,
+  kGeneralError = -4,
+  kFlushIndicator = -3,  // Indicator that a flush has occurred.
+  kTimeStampError = -2,
+  kSizeError = -1,
+  kNoError = 0,
+  kIncomplete = 1,        // Frame incomplete.
+  kCompleteSession = 3,   // at least one layer in the frame complete.
+  kDecodableSession = 4,  // Frame incomplete, but ready to be decoded
+  kDuplicatePacket = 5    // We're receiving a duplicate packet.
 };
 
 enum VCMFrameBufferStateEnum {
-  kStateEmpty,              // frame popped by the RTP receiver
-  kStateIncomplete,         // frame that have one or more packet(s) stored
-  kStateComplete,           // frame that have all packets
-  kStateDecodable           // Hybrid mode - frame can be decoded
+  kStateEmpty,       // frame popped by the RTP receiver
+  kStateIncomplete,  // frame that have one or more packet(s) stored
+  kStateComplete,    // frame that have all packets
+  kStateDecodable    // Hybrid mode - frame can be decoded
 };
 
-enum { kH264StartCodeLengthBytes = 4};
+enum { kH264StartCodeLengthBytes = 4 };
 
 // Used to indicate if a received packet contain a complete NALU (or equivalent)
 enum VCMNaluCompleteness {
-  kNaluUnset = 0,       // Packet has not been filled.
-  kNaluComplete = 1,    // Packet can be decoded as is.
-  kNaluStart,           // Packet contain beginning of NALU
-  kNaluIncomplete,      // Packet is not beginning or end of NALU
-  kNaluEnd,             // Packet is the end of a NALU
+  kNaluUnset = 0,     // Packet has not been filled.
+  kNaluComplete = 1,  // Packet can be decoded as is.
+  kNaluStart,         // Packet contain beginning of NALU
+  kNaluIncomplete,    // Packet is not beginning or end of NALU
+  kNaluEnd,           // Packet is the end of a NALU
 };
 }  // namespace webrtc
 
diff --git a/webrtc/modules/video_coding/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
index 116bf08..8abc1b5 100644
--- a/webrtc/modules/video_coding/jitter_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
@@ -26,13 +26,12 @@
 namespace webrtc {
 
 namespace {
-  const uint32_t kProcessIntervalSec = 60;
+const uint32_t kProcessIntervalSec = 60;
 }  // namespace
 
 class Vp9SsMapTest : public ::testing::Test {
  protected:
-  Vp9SsMapTest()
-      : packet_(data_, 1400, 1234, 1, true) {}
+  Vp9SsMapTest() : packet_(data_, 1400, 1234, 1, true) {}
 
   virtual void SetUp() {
     packet_.isFirstPacket = true;
@@ -234,8 +233,8 @@
   }
 
   void CheckOutFrame(VCMEncodedFrame* frame_out,
-                    unsigned int size,
-                    bool startCode) {
+                     unsigned int size,
+                     bool startCode) {
     ASSERT_TRUE(frame_out);
 
     const uint8_t* outData = frame_out->Buffer();
@@ -280,7 +279,6 @@
   rtc::scoped_ptr<VCMJitterBuffer> jitter_buffer_;
 };
 
-
 class TestRunningJitterBuffer : public ::testing::Test {
  protected:
   enum { kDataBufferSize = 10 };
@@ -294,8 +292,8 @@
         rtc::scoped_ptr<EventWrapper>(event_factory_.CreateEvent()));
     stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
     jitter_buffer_->Start();
-    jitter_buffer_->SetNackSettings(max_nack_list_size_,
-                                    oldest_packet_to_nack_, 0);
+    jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
+                                    0);
     memset(data_buffer_, 0, kDataBufferSize);
   }
 
@@ -396,9 +394,7 @@
     jitter_buffer_->SetNackMode(kNack, -1, -1);
   }
 
-  virtual void TearDown() {
-    TestRunningJitterBuffer::TearDown();
-  }
+  virtual void TearDown() { TestRunningJitterBuffer::TearDown(); }
 };
 
 TEST_F(TestBasicJitterBuffer, StopRunning) {
@@ -431,8 +427,8 @@
 
   // Insert the packet to the jitter buffer and get a frame.
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -450,8 +446,8 @@
 
   // Insert single packet frame to the jitter buffer and get a frame.
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -460,25 +456,25 @@
   // Verify that histograms are updated when the jitter buffer is stopped.
   clock_->AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
   jitter_buffer_->Stop();
-  EXPECT_EQ(0, test::LastHistogramSample(
-      "WebRTC.Video.DiscardedPacketsInPercent"));
-  EXPECT_EQ(0, test::LastHistogramSample(
-      "WebRTC.Video.DuplicatedPacketsInPercent"));
+  EXPECT_EQ(
+      0, test::LastHistogramSample("WebRTC.Video.DiscardedPacketsInPercent"));
+  EXPECT_EQ(
+      0, test::LastHistogramSample("WebRTC.Video.DuplicatedPacketsInPercent"));
   EXPECT_NE(-1, test::LastHistogramSample(
-      "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+                    "WebRTC.Video.CompleteFramesReceivedPerSecond"));
   EXPECT_EQ(1000, test::LastHistogramSample(
-      "WebRTC.Video.KeyFramesReceivedInPermille"));
+                      "WebRTC.Video.KeyFramesReceivedInPermille"));
 
   // Verify that histograms are not updated if stop is called again.
   jitter_buffer_->Stop();
+  EXPECT_EQ(
+      1, test::NumHistogramSamples("WebRTC.Video.DiscardedPacketsInPercent"));
+  EXPECT_EQ(
+      1, test::NumHistogramSamples("WebRTC.Video.DuplicatedPacketsInPercent"));
   EXPECT_EQ(1, test::NumHistogramSamples(
-      "WebRTC.Video.DiscardedPacketsInPercent"));
-  EXPECT_EQ(1, test::NumHistogramSamples(
-      "WebRTC.Video.DuplicatedPacketsInPercent"));
-  EXPECT_EQ(1, test::NumHistogramSamples(
-      "WebRTC.Video.CompleteFramesReceivedPerSecond"));
-  EXPECT_EQ(1, test::NumHistogramSamples(
-      "WebRTC.Video.KeyFramesReceivedInPermille"));
+                   "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+  EXPECT_EQ(
+      1, test::NumHistogramSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
 }
 
 TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
@@ -487,8 +483,8 @@
   packet_->markerBit = false;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   // Should not be complete.
   EXPECT_TRUE(frame_out == NULL);
@@ -498,8 +494,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
@@ -514,8 +510,8 @@
   packet_->markerBit = false;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -530,8 +526,8 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
 
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     loop++;
   } while (loop < 98);
 
@@ -541,8 +537,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
 
@@ -558,8 +554,8 @@
   packet_->markerBit = true;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_FALSE(frame_out == NULL);
   jitter_buffer_->ReleaseFrame(frame_out);
@@ -570,8 +566,8 @@
   packet_->frameType = kVideoFrameDelta;
   packet_->timestamp += 33 * 90;
 
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
 
@@ -586,8 +582,8 @@
     packet_->seqNum = seq_num_;
 
     // Insert a packet into a frame.
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     loop++;
   } while (loop < 98);
 
@@ -597,8 +593,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
 
@@ -617,8 +613,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -632,8 +628,8 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
 
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     loop++;
   } while (loop < 98);
 
@@ -643,10 +639,10 @@
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
-  frame_out = DecodeCompleteFrame();;
+  frame_out = DecodeCompleteFrame();
 
   CheckOutFrame(frame_out, 100 * size_, false);
 
@@ -660,8 +656,8 @@
   packet_->markerBit = false;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -672,23 +668,23 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   // check that we fail to get frame since seqnum is not continuous
   frame_out = DecodeCompleteFrame();
   EXPECT_TRUE(frame_out == NULL);
 
   seq_num_ -= 3;
-  timestamp_ -= 33*90;
+  timestamp_ -= 33 * 90;
   packet_->frameType = kVideoFrameKey;
   packet_->isFirstPacket = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
 
@@ -700,8 +696,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
@@ -781,8 +777,8 @@
   EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -791,8 +787,8 @@
   EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
 
   // Insert a packet into a frame.
-  EXPECT_EQ(kDuplicatePacket, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kDuplicatePacket,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_EQ(2, jitter_buffer_->num_packets());
   EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
 
@@ -801,8 +797,8 @@
   packet_->markerBit = true;
   packet_->isFirstPacket = false;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   ASSERT_TRUE(frame_out != NULL);
@@ -1086,8 +1082,8 @@
   packet_->insertStartCode = true;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -1099,8 +1095,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
@@ -1120,8 +1116,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   uint32_t timestamp = 0;
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1129,8 +1125,8 @@
   packet_->isFirstPacket = false;
   for (int i = 1; i < 9; ++i) {
     packet_->seqNum++;
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
     EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
   }
@@ -1139,8 +1135,8 @@
   packet_->markerBit = true;
   packet_->seqNum++;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 10 * size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1154,8 +1150,8 @@
   packet_->seqNum += 100;
   packet_->timestamp += 33 * 90 * 8;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
@@ -1163,23 +1159,23 @@
   packet_->seqNum -= 99;
   packet_->timestamp -= 33 * 90 * 7;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
   packet_->isFirstPacket = false;
   for (int i = 1; i < 8; ++i) {
     packet_->seqNum++;
-    EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                              &retransmitted));
+    EXPECT_EQ(kDecodableSession,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
     EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
   }
 
   packet_->seqNum++;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
@@ -1191,8 +1187,7 @@
 
   packet_->markerBit = true;
   packet_->seqNum++;
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 }
 
 // Make sure first packet is present before a frame can be decoded.
@@ -1206,8 +1201,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1219,9 +1214,9 @@
   packet_->isFirstPacket = false;
   packet_->markerBit = false;
   packet_->seqNum += 100;
-  packet_->timestamp += 33*90*8;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  packet_->timestamp += 33 * 90 * 8;
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   uint32_t timestamp;
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1230,10 +1225,10 @@
   packet_->frameType = kVideoFrameKey;
   packet_->isFirstPacket = true;
   packet_->seqNum -= 99;
-  packet_->timestamp -= 33*90*7;
+  packet_->timestamp -= 33 * 90 * 7;
 
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
@@ -1242,8 +1237,8 @@
   packet_->isFirstPacket = false;
   for (int i = 1; i < 5; ++i) {
     packet_->seqNum++;
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                              &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
     EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
   }
@@ -1251,8 +1246,8 @@
   // Complete key frame.
   packet_->markerBit = true;
   packet_->seqNum++;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 6 * size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1270,8 +1265,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, size_, false);
   EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1283,9 +1278,9 @@
   packet_->isFirstPacket = false;
   packet_->markerBit = false;
   packet_->seqNum += 100;
-  packet_->timestamp += 33*90*8;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  packet_->timestamp += 33 * 90 * 8;
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   uint32_t timestamp;
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1293,17 +1288,17 @@
   // Insert second frame with the first packet missing. Make sure we're waiting
   // for the key frame to be complete.
   packet_->seqNum -= 98;
-  packet_->timestamp -= 33*90*7;
+  packet_->timestamp -= 33 * 90 * 7;
 
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
   for (int i = 0; i < 5; ++i) {
     packet_->seqNum++;
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                              &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
     EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
   }
@@ -1311,8 +1306,8 @@
   // Add first packet. Frame should now be decodable, but incomplete.
   packet_->isFirstPacket = true;
   packet_->seqNum -= 6;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
   EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
 
@@ -1331,8 +1326,8 @@
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   uint32_t next_timestamp;
   EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
   EXPECT_EQ(packet_->timestamp, next_timestamp);
@@ -1348,8 +1343,8 @@
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   // Insert a packet (so the previous one will be released).
   timestamp_ += 33 * 90;
   seq_num_ += 2;
@@ -1358,8 +1353,8 @@
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
   EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
   EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
@@ -1384,12 +1379,12 @@
   packet_->completeNALU = kNaluStart;
 
   bool retransmitted = false;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   for (int i = 0; i < 11; ++i) {
     webrtc::FrameType frametype = kVideoFrameDelta;
     seq_num_++;
-    timestamp_ += 33*90;
+    timestamp_ += 33 * 90;
     packet_->frameType = frametype;
     packet_->isFirstPacket = true;
     packet_->markerBit = false;
@@ -1397,8 +1392,8 @@
     packet_->timestamp = timestamp_;
     packet_->completeNALU = kNaluStart;
 
-    EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                              &retransmitted));
+    EXPECT_EQ(kDecodableSession,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
     VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -1432,9 +1427,9 @@
       CheckOutFrame(frame_out, size_, false);
 
       if (i == 0) {
-          EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+        EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
       } else {
-         EXPECT_EQ(frametype, frame_out->FrameType());
+        EXPECT_EQ(frametype, frame_out->FrameType());
       }
       EXPECT_FALSE(frame_out->Complete());
       EXPECT_FALSE(frame_out->MissingFrame());
@@ -1448,18 +1443,15 @@
   timestamp_ -= 33 * 90;
   packet_->timestamp = timestamp_ - 1000;
 
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   packet_->timestamp = timestamp_ - 500;
 
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   packet_->timestamp = timestamp_ - 100;
 
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   EXPECT_EQ(3, jitter_buffer_->num_discarded_packets());
 
@@ -1478,8 +1470,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
@@ -1492,8 +1484,8 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
 
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
     frame_out = DecodeCompleteFrame();
 
@@ -1507,8 +1499,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
 
@@ -1527,8 +1519,8 @@
   packet_->seqNum = seq_num_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
 
   // Should not be complete.
@@ -1542,8 +1534,8 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
 
-    EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                        &retransmitted));
+    EXPECT_EQ(kIncomplete,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
     frame_out = DecodeCompleteFrame();
 
@@ -1558,8 +1550,8 @@
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 100 * size_, false);
@@ -1581,8 +1573,8 @@
   packet_->seqNum = seq_num_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(3000u, frame_out->TimeStamp());
@@ -1598,8 +1590,7 @@
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 }
 
 TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
@@ -1617,8 +1608,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(timestamp_, frame_out->TimeStamp());
@@ -1637,10 +1628,8 @@
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-
   // This timestamp is old.
-  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 }
 
 TEST_F(TestBasicJitterBuffer, TimestampWrap) {
@@ -1657,8 +1646,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_TRUE(frame_out == NULL);
@@ -1668,23 +1657,23 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
   jitter_buffer_->ReleaseFrame(frame_out);
 
   seq_num_++;
-  timestamp_ += 33*90;
+  timestamp_ += 33 * 90;
   packet_->frameType = kVideoFrameDelta;
   packet_->isFirstPacket = true;
   packet_->markerBit = false;
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-  EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                       &retransmitted));
+  EXPECT_EQ(kIncomplete,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   EXPECT_TRUE(frame_out == NULL);
@@ -1694,8 +1683,8 @@
   packet_->markerBit = true;
   packet_->seqNum = seq_num_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   CheckOutFrame(frame_out, 2 * size_, false);
@@ -1717,8 +1706,8 @@
 
   bool retransmitted = false;
   // Insert first frame (session will be complete).
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   // Insert next frame.
   seq_num_++;
@@ -1729,8 +1718,8 @@
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1760,8 +1749,8 @@
   packet_->timestamp = timestamp_;
 
   bool retransmitted = false;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   // Insert second frame
   seq_num_--;
@@ -1772,8 +1761,8 @@
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1800,12 +1789,12 @@
     packet_->seqNum = seq_num_;
 
     if (firstPacket) {
-      EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+      EXPECT_EQ(kIncomplete,
+                jitter_buffer_->InsertPacket(*packet_, &retransmitted));
       firstPacket = false;
     } else {
-      EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
-                                                          &retransmitted));
+      EXPECT_EQ(kIncomplete,
+                jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     }
 
     loop++;
@@ -1819,10 +1808,8 @@
   packet_->seqNum = seq_num_;
 
   // Insert the packet -> frame recycled.
-  EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+  EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   EXPECT_TRUE(NULL == DecodeCompleteFrame());
-
 }
 
 TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
@@ -1837,8 +1824,7 @@
   // Make sure the jitter doesn't request a keyframe after too much non-
   // decodable frames.
   jitter_buffer_->SetNackMode(kNack, -1, -1);
-  jitter_buffer_->SetNackSettings(kMaxNumberOfFrames,
-                                  kMaxNumberOfFrames, 0);
+  jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
 
   int loop = 0;
   seq_num_ = 65485;
@@ -1846,7 +1832,7 @@
   bool retransmitted = false;
   // Insert MAX_NUMBER_OF_FRAMES frames.
   do {
-    timestamp_ += 33*90;
+    timestamp_ += 33 * 90;
     seq_num_++;
     packet_->isFirstPacket = true;
     packet_->markerBit = true;
@@ -1859,8 +1845,8 @@
     }
 
     // Insert frame.
-    EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                             &retransmitted));
+    EXPECT_EQ(kCompleteSession,
+              jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
     loop++;
   } while (loop < kMaxNumberOfFrames);
@@ -1868,7 +1854,7 @@
   // Max number of frames inserted.
 
   // Insert one more frame.
-  timestamp_ += 33*90;
+  timestamp_ += 33 * 90;
   seq_num_++;
   packet_->isFirstPacket = true;
   packet_->markerBit = true;
@@ -1902,8 +1888,7 @@
     packet_->timestamp = timestamp_;
     packet_->frameType = kEmptyFrame;
 
-    EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
-                                                     &retransmitted));
+    EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
     VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
     // Timestamp should never be the last TS inserted.
     if (testFrame != NULL) {
@@ -1927,8 +1912,8 @@
   packet_->markerBit = false;
   bool retransmitted = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   seq_num_ += 2;  // Skip one packet.
   packet_->seqNum = seq_num_;
@@ -1937,8 +1922,8 @@
   packet_->completeNALU = kNaluIncomplete;
   packet_->markerBit = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   seq_num_++;
   packet_->seqNum = seq_num_;
@@ -1947,15 +1932,15 @@
   packet_->completeNALU = kNaluEnd;
   packet_->markerBit = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   seq_num_++;
   packet_->seqNum = seq_num_;
   packet_->completeNALU = kNaluComplete;
   packet_->markerBit = true;  // Last packet.
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   // The JB will only output (incomplete) frames if a packet belonging to a
   // subsequent frame was already inserted. Insert one packet of a subsequent
   // frame. place high timestamp so the JB would always have a next frame
@@ -1968,8 +1953,8 @@
   packet_->completeNALU = kNaluStart;
   packet_->markerBit = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   VCMEncodedFrame* frame_out = DecodeIncompleteFrame();
 
@@ -1981,7 +1966,7 @@
 
   // Test reordered start frame + 1 lost.
   seq_num_ += 2;  // Re-order 1 frame.
-  timestamp_ += 33*90;
+  timestamp_ += 33 * 90;
   insertedLength = 0;
 
   packet_->seqNum = seq_num_;
@@ -1990,9 +1975,9 @@
   packet_->isFirstPacket = false;
   packet_->completeNALU = kNaluEnd;
   packet_->markerBit = false;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
-  insertedLength += packet_->sizeBytes; // This packet should be decoded.
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+  insertedLength += packet_->sizeBytes;  // This packet should be decoded.
   seq_num_--;
   packet_->seqNum = seq_num_;
   packet_->timestamp = timestamp_;
@@ -2001,8 +1986,8 @@
   packet_->completeNALU = kNaluStart;
   packet_->markerBit = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   insertedLength += packet_->sizeBytes;  // This packet should be decoded.
 
   seq_num_ += 3;  // One packet drop.
@@ -2012,8 +1997,8 @@
   packet_->isFirstPacket = false;
   packet_->completeNALU = kNaluComplete;
   packet_->markerBit = false;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   insertedLength += packet_->sizeBytes;  // This packet should be decoded.
   seq_num_++;
   packet_->seqNum = seq_num_;
@@ -2022,8 +2007,8 @@
   packet_->isFirstPacket = false;
   packet_->completeNALU = kNaluStart;
   packet_->markerBit = false;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   // This packet should be decoded since it's the beginning of a NAL.
   insertedLength += packet_->sizeBytes;
 
@@ -2034,8 +2019,8 @@
   packet_->isFirstPacket = false;
   packet_->completeNALU = kNaluEnd;
   packet_->markerBit = true;
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   // This packet should not be decoded because it is an incomplete NAL if it
   // is the last.
   frame_out = DecodeIncompleteFrame();
@@ -2053,8 +2038,8 @@
   emptypacket.isFirstPacket = true;
   emptypacket.completeNALU = kNaluComplete;
   emptypacket.markerBit = true;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
   // This packet should not be decoded because it is an incomplete NAL if it
   // is the last.
 
@@ -2075,8 +2060,8 @@
   packet_->completeNALU = kNaluComplete;
   packet_->markerBit = false;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   seq_num_++;
   emptypacket.seqNum = seq_num_;
@@ -2085,8 +2070,8 @@
   emptypacket.isFirstPacket = true;
   emptypacket.completeNALU = kNaluComplete;
   emptypacket.markerBit = true;
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
 
   frame_out = DecodeCompleteFrame();
   // Only last NALU is complete
@@ -2105,8 +2090,8 @@
   packet_->markerBit = true;
   bool retransmitted = false;
 
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
   VCMEncodedFrame* frame_out = DecodeCompleteFrame();
   EXPECT_TRUE(frame_out != NULL);
   jitter_buffer_->ReleaseFrame(frame_out);
@@ -2117,9 +2102,8 @@
   packet_->isFirstPacket = false;
   packet_->markerBit = false;
 
-
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeIncompleteFrame();
   EXPECT_TRUE(frame_out == NULL);
@@ -2128,8 +2112,8 @@
   packet_->timestamp += 33 * 90;
   packet_->isFirstPacket = true;
 
-  EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
-                                                            &retransmitted));
+  EXPECT_EQ(kDecodableSession,
+            jitter_buffer_->InsertPacket(*packet_, &retransmitted));
 
   frame_out = DecodeIncompleteFrame();
   CheckOutFrame(frame_out, packet_->sizeBytes, false);
@@ -2140,8 +2124,7 @@
   // Make sure the jitter doesn't request a keyframe after too much non-
   // decodable frames.
   jitter_buffer_->SetNackMode(kNack, -1, -1);
-  jitter_buffer_->SetNackSettings(kMaxNumberOfFrames,
-                                  kMaxNumberOfFrames, 0);
+  jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
   // Insert a key frame and decode it.
   EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
   EXPECT_TRUE(DecodeCompleteFrame());
@@ -2290,8 +2273,8 @@
   // old packet.
   DropFrame(1);
   // Insert a frame which should trigger a recycle until the next key frame.
-  EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
-                                          kVideoFrameDelta));
+  EXPECT_EQ(kFlushIndicator,
+            InsertFrames(oldest_packet_to_nack_ + 1, kVideoFrameDelta));
   EXPECT_FALSE(DecodeCompleteFrame());
 
   bool request_key_frame = false;
@@ -2382,7 +2365,7 @@
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
   InsertFrame(kVideoFrameKey);
   stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
-                                  clock_->TimeInMilliseconds());
+                                   clock_->TimeInMilliseconds());
   stream_generator_->NextPacket(NULL);  // Drop packet.
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   EXPECT_TRUE(DecodeCompleteFrame());
@@ -2410,8 +2393,8 @@
   EXPECT_EQ(1u, nack_list.size());
   stream_generator_->PopPacket(&packet, 0);
   EXPECT_EQ(packet.seqNum, nack_list[0]);
-  EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(packet,
-                                                           &retransmitted));
+  EXPECT_EQ(kCompleteSession,
+            jitter_buffer_->InsertPacket(packet, &retransmitted));
   EXPECT_TRUE(retransmitted);
   EXPECT_TRUE(DecodeCompleteFrame());
 }
@@ -2419,7 +2402,7 @@
 TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
   stream_generator_->Init(0, clock_->TimeInMilliseconds());
   stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
-                                  clock_->TimeInMilliseconds());
+                                   clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   // Drop second packet.
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
@@ -2467,7 +2450,7 @@
   // | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
   //  ----------------------------------------------------------------
   stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
-                                  clock_->TimeInMilliseconds());
+                                   clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
   // Verify that the frame is incomplete.
@@ -2503,7 +2486,7 @@
   EXPECT_FALSE(request_key_frame);
   EXPECT_TRUE(DecodeCompleteFrame());
   stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
-                                  clock_->TimeInMilliseconds());
+                                   clock_->TimeInMilliseconds());
   EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
   while (stream_generator_->PacketsRemaining() > 1) {
     if (stream_generator_->NextSequenceNumber() % 10 != 0) {
@@ -2540,7 +2523,7 @@
                                    clock_->TimeInMilliseconds());
   clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
   for (int i = 0; i < 5; ++i) {
-    if (stream_generator_->NextSequenceNumber()  != 65535) {
+    if (stream_generator_->NextSequenceNumber() != 65535) {
       EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
       EXPECT_FALSE(request_key_frame);
     } else {
diff --git a/webrtc/modules/video_coding/jitter_estimator.cc b/webrtc/modules/video_coding/jitter_estimator.cc
index 1511665..8270c60 100644
--- a/webrtc/modules/video_coding/jitter_estimator.cc
+++ b/webrtc/modules/video_coding/jitter_estimator.cc
@@ -8,16 +8,18 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "webrtc/modules/video_coding/internal_defines.h"
 #include "webrtc/modules/video_coding/jitter_estimator.h"
-#include "webrtc/modules/video_coding/rtt_filter.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/field_trial.h"
 
 #include <assert.h>
 #include <math.h>
 #include <stdlib.h>
 #include <string.h>
+#include <string>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/rtt_filter.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/field_trial.h"
 
 namespace webrtc {
 
@@ -48,267 +50,243 @@
   Reset();
 }
 
-VCMJitterEstimator::~VCMJitterEstimator() {
-}
+VCMJitterEstimator::~VCMJitterEstimator() {}
 
-VCMJitterEstimator&
-VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
-{
-    if (this != &rhs)
-    {
-        memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
-        memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
+VCMJitterEstimator& VCMJitterEstimator::operator=(
+    const VCMJitterEstimator& rhs) {
+  if (this != &rhs) {
+    memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
+    memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
 
-        _vcmId = rhs._vcmId;
-        _receiverId = rhs._receiverId;
-        _avgFrameSize = rhs._avgFrameSize;
-        _varFrameSize = rhs._varFrameSize;
-        _maxFrameSize = rhs._maxFrameSize;
-        _fsSum = rhs._fsSum;
-        _fsCount = rhs._fsCount;
-        _lastUpdateT = rhs._lastUpdateT;
-        _prevEstimate = rhs._prevEstimate;
-        _prevFrameSize = rhs._prevFrameSize;
-        _avgNoise = rhs._avgNoise;
-        _alphaCount = rhs._alphaCount;
-        _filterJitterEstimate = rhs._filterJitterEstimate;
-        _startupCount = rhs._startupCount;
-        _latestNackTimestamp = rhs._latestNackTimestamp;
-        _nackCount = rhs._nackCount;
-        _rttFilter = rhs._rttFilter;
-    }
-    return *this;
+    _vcmId = rhs._vcmId;
+    _receiverId = rhs._receiverId;
+    _avgFrameSize = rhs._avgFrameSize;
+    _varFrameSize = rhs._varFrameSize;
+    _maxFrameSize = rhs._maxFrameSize;
+    _fsSum = rhs._fsSum;
+    _fsCount = rhs._fsCount;
+    _lastUpdateT = rhs._lastUpdateT;
+    _prevEstimate = rhs._prevEstimate;
+    _prevFrameSize = rhs._prevFrameSize;
+    _avgNoise = rhs._avgNoise;
+    _alphaCount = rhs._alphaCount;
+    _filterJitterEstimate = rhs._filterJitterEstimate;
+    _startupCount = rhs._startupCount;
+    _latestNackTimestamp = rhs._latestNackTimestamp;
+    _nackCount = rhs._nackCount;
+    _rttFilter = rhs._rttFilter;
+  }
+  return *this;
 }
 
 // Resets the JitterEstimate
-void
-VCMJitterEstimator::Reset()
-{
-    _theta[0] = 1/(512e3/8);
-    _theta[1] = 0;
-    _varNoise = 4.0;
+void VCMJitterEstimator::Reset() {
+  _theta[0] = 1 / (512e3 / 8);
+  _theta[1] = 0;
+  _varNoise = 4.0;
 
-    _thetaCov[0][0] = 1e-4;
-    _thetaCov[1][1] = 1e2;
-    _thetaCov[0][1] = _thetaCov[1][0] = 0;
-    _Qcov[0][0] = 2.5e-10;
-    _Qcov[1][1] = 1e-10;
-    _Qcov[0][1] = _Qcov[1][0] = 0;
-    _avgFrameSize = 500;
-    _maxFrameSize = 500;
-    _varFrameSize = 100;
-    _lastUpdateT = -1;
-    _prevEstimate = -1.0;
-    _prevFrameSize = 0;
-    _avgNoise = 0.0;
-    _alphaCount = 1;
-    _filterJitterEstimate = 0.0;
-    _latestNackTimestamp = 0;
-    _nackCount = 0;
-    _fsSum = 0;
-    _fsCount = 0;
-    _startupCount = 0;
-    _rttFilter.Reset();
-    fps_counter_.Reset();
+  _thetaCov[0][0] = 1e-4;
+  _thetaCov[1][1] = 1e2;
+  _thetaCov[0][1] = _thetaCov[1][0] = 0;
+  _Qcov[0][0] = 2.5e-10;
+  _Qcov[1][1] = 1e-10;
+  _Qcov[0][1] = _Qcov[1][0] = 0;
+  _avgFrameSize = 500;
+  _maxFrameSize = 500;
+  _varFrameSize = 100;
+  _lastUpdateT = -1;
+  _prevEstimate = -1.0;
+  _prevFrameSize = 0;
+  _avgNoise = 0.0;
+  _alphaCount = 1;
+  _filterJitterEstimate = 0.0;
+  _latestNackTimestamp = 0;
+  _nackCount = 0;
+  _fsSum = 0;
+  _fsCount = 0;
+  _startupCount = 0;
+  _rttFilter.Reset();
+  fps_counter_.Reset();
 }
 
-void
-VCMJitterEstimator::ResetNackCount()
-{
-    _nackCount = 0;
+void VCMJitterEstimator::ResetNackCount() {
+  _nackCount = 0;
 }
 
 // Updates the estimates with the new measurements
-void
-VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes,
-                                            bool incompleteFrame /* = false */)
-{
-    if (frameSizeBytes == 0)
-    {
-        return;
+void VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS,
+                                        uint32_t frameSizeBytes,
+                                        bool incompleteFrame /* = false */) {
+  if (frameSizeBytes == 0) {
+    return;
+  }
+  int deltaFS = frameSizeBytes - _prevFrameSize;
+  if (_fsCount < kFsAccuStartupSamples) {
+    _fsSum += frameSizeBytes;
+    _fsCount++;
+  } else if (_fsCount == kFsAccuStartupSamples) {
+    // Give the frame size filter
+    _avgFrameSize = static_cast<double>(_fsSum) / static_cast<double>(_fsCount);
+    _fsCount++;
+  }
+  if (!incompleteFrame || frameSizeBytes > _avgFrameSize) {
+    double avgFrameSize = _phi * _avgFrameSize + (1 - _phi) * frameSizeBytes;
+    if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize)) {
+      // Only update the average frame size if this sample wasn't a
+      // key frame
+      _avgFrameSize = avgFrameSize;
     }
-    int deltaFS = frameSizeBytes - _prevFrameSize;
-    if (_fsCount < kFsAccuStartupSamples)
-    {
-        _fsSum += frameSizeBytes;
-        _fsCount++;
-    }
-    else if (_fsCount == kFsAccuStartupSamples)
-    {
-        // Give the frame size filter
-        _avgFrameSize = static_cast<double>(_fsSum) /
-                        static_cast<double>(_fsCount);
-        _fsCount++;
-    }
-    if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
-    {
-        double avgFrameSize = _phi * _avgFrameSize +
-                              (1 - _phi) * frameSizeBytes;
-        if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
-        {
-            // Only update the average frame size if this sample wasn't a
-            // key frame
-            _avgFrameSize = avgFrameSize;
-        }
-        // Update the variance anyway since we want to capture cases where we only get
-        // key frames.
-        _varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
-                                (frameSizeBytes - avgFrameSize) *
-                                (frameSizeBytes - avgFrameSize), 1.0);
-    }
+    // Update the variance anyway since we want to capture cases where we only
+    // get
+    // key frames.
+    _varFrameSize = VCM_MAX(_phi * _varFrameSize +
+                                (1 - _phi) * (frameSizeBytes - avgFrameSize) *
+                                    (frameSizeBytes - avgFrameSize),
+                            1.0);
+  }
 
-    // Update max frameSize estimate
-    _maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
+  // Update max frameSize estimate
+  _maxFrameSize =
+      VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
 
-    if (_prevFrameSize == 0)
-    {
-        _prevFrameSize = frameSizeBytes;
-        return;
-    }
+  if (_prevFrameSize == 0) {
     _prevFrameSize = frameSizeBytes;
+    return;
+  }
+  _prevFrameSize = frameSizeBytes;
 
-    // Only update the Kalman filter if the sample is not considered
-    // an extreme outlier. Even if it is an extreme outlier from a
-    // delay point of view, if the frame size also is large the
-    // deviation is probably due to an incorrect line slope.
-    double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
+  // Only update the Kalman filter if the sample is not considered
+  // an extreme outlier. Even if it is an extreme outlier from a
+  // delay point of view, if the frame size also is large the
+  // deviation is probably due to an incorrect line slope.
+  double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
 
-    if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
-        frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
-    {
-        // Update the variance of the deviation from the
-        // line given by the Kalman filter
-        EstimateRandomJitter(deviation, incompleteFrame);
-        // Prevent updating with frames which have been congested by a large
-        // frame, and therefore arrives almost at the same time as that frame.
-        // This can occur when we receive a large frame (key frame) which
-        // has been delayed. The next frame is of normal size (delta frame),
-        // and thus deltaFS will be << 0. This removes all frame samples
-        // which arrives after a key frame.
-        if ((!incompleteFrame || deviation >= 0.0) &&
-            static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
-        {
-            // Update the Kalman filter with the new data
-            KalmanEstimateChannel(frameDelayMS, deltaFS);
-        }
+  if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+      frameSizeBytes >
+          _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize)) {
+    // Update the variance of the deviation from the
+    // line given by the Kalman filter
+    EstimateRandomJitter(deviation, incompleteFrame);
+    // Prevent updating with frames which have been congested by a large
+    // frame, and therefore arrives almost at the same time as that frame.
+    // This can occur when we receive a large frame (key frame) which
+    // has been delayed. The next frame is of normal size (delta frame),
+    // and thus deltaFS will be << 0. This removes all frame samples
+    // which arrives after a key frame.
+    if ((!incompleteFrame || deviation >= 0.0) &&
+        static_cast<double>(deltaFS) > -0.25 * _maxFrameSize) {
+      // Update the Kalman filter with the new data
+      KalmanEstimateChannel(frameDelayMS, deltaFS);
     }
-    else
-    {
-        int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
-        EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
-    }
-    // Post process the total estimated jitter
-    if (_startupCount >= kStartupDelaySamples)
-    {
-        PostProcessEstimate();
-    }
-    else
-    {
-        _startupCount++;
-    }
+  } else {
+    int nStdDev =
+        (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
+    EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
+  }
+  // Post process the total estimated jitter
+  if (_startupCount >= kStartupDelaySamples) {
+    PostProcessEstimate();
+  } else {
+    _startupCount++;
+  }
 }
 
 // Updates the nack/packet ratio
-void
-VCMJitterEstimator::FrameNacked()
-{
-    // Wait until _nackLimit retransmissions has been received,
-    // then always add ~1 RTT delay.
-    // TODO(holmer): Should we ever remove the additional delay if the
-    // the packet losses seem to have stopped? We could for instance scale
-    // the number of RTTs to add with the amount of retransmissions in a given
-    // time interval, or similar.
-    if (_nackCount < _nackLimit)
-    {
-        _nackCount++;
-    }
+void VCMJitterEstimator::FrameNacked() {
+  // Wait until _nackLimit retransmissions has been received,
+  // then always add ~1 RTT delay.
+  // TODO(holmer): Should we ever remove the additional delay if the
+  // the packet losses seem to have stopped? We could for instance scale
+  // the number of RTTs to add with the amount of retransmissions in a given
+  // time interval, or similar.
+  if (_nackCount < _nackLimit) {
+    _nackCount++;
+  }
 }
 
 // Updates Kalman estimate of the channel
 // The caller is expected to sanity check the inputs.
-void
-VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
-                                          int32_t deltaFSBytes)
-{
-    double Mh[2];
-    double hMh_sigma;
-    double kalmanGain[2];
-    double measureRes;
-    double t00, t01;
+void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
+                                               int32_t deltaFSBytes) {
+  double Mh[2];
+  double hMh_sigma;
+  double kalmanGain[2];
+  double measureRes;
+  double t00, t01;
 
-    // Kalman filtering
+  // Kalman filtering
 
-    // Prediction
-    // M = M + Q
-    _thetaCov[0][0] += _Qcov[0][0];
-    _thetaCov[0][1] += _Qcov[0][1];
-    _thetaCov[1][0] += _Qcov[1][0];
-    _thetaCov[1][1] += _Qcov[1][1];
+  // Prediction
+  // M = M + Q
+  _thetaCov[0][0] += _Qcov[0][0];
+  _thetaCov[0][1] += _Qcov[0][1];
+  _thetaCov[1][0] += _Qcov[1][0];
+  _thetaCov[1][1] += _Qcov[1][1];
 
-    // Kalman gain
-    // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
-    // h = [dFS 1]
-    // Mh = M*h'
-    // hMh_sigma = h*M*h' + R
-    Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
-    Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
-    // sigma weights measurements with a small deltaFS as noisy and
-    // measurements with large deltaFS as good
-    if (_maxFrameSize < 1.0)
-    {
-        return;
-    }
-    double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
-                   (1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
-    if (sigma < 1.0)
-    {
-        sigma = 1.0;
-    }
-    hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
-    if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
-    {
-        assert(false);
-        return;
-    }
-    kalmanGain[0] = Mh[0] / hMh_sigma;
-    kalmanGain[1] = Mh[1] / hMh_sigma;
+  // Kalman gain
+  // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
+  // h = [dFS 1]
+  // Mh = M*h'
+  // hMh_sigma = h*M*h' + R
+  Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
+  Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
+  // sigma weights measurements with a small deltaFS as noisy and
+  // measurements with large deltaFS as good
+  if (_maxFrameSize < 1.0) {
+    return;
+  }
+  double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
+                              (1e0 * _maxFrameSize)) +
+                  1) *
+                 sqrt(_varNoise);
+  if (sigma < 1.0) {
+    sigma = 1.0;
+  }
+  hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
+  if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) ||
+      (hMh_sigma > -1e-9 && hMh_sigma <= 0)) {
+    assert(false);
+    return;
+  }
+  kalmanGain[0] = Mh[0] / hMh_sigma;
+  kalmanGain[1] = Mh[1] / hMh_sigma;
 
-    // Correction
-    // theta = theta + K*(dT - h*theta)
-    measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
-    _theta[0] += kalmanGain[0] * measureRes;
-    _theta[1] += kalmanGain[1] * measureRes;
+  // Correction
+  // theta = theta + K*(dT - h*theta)
+  measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
+  _theta[0] += kalmanGain[0] * measureRes;
+  _theta[1] += kalmanGain[1] * measureRes;
 
-    if (_theta[0] < _thetaLow)
-    {
-        _theta[0] = _thetaLow;
-    }
+  if (_theta[0] < _thetaLow) {
+    _theta[0] = _thetaLow;
+  }
 
-    // M = (I - K*h)*M
-    t00 = _thetaCov[0][0];
-    t01 = _thetaCov[0][1];
-    _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
-                      kalmanGain[0] * _thetaCov[1][0];
-    _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
-                      kalmanGain[0] * _thetaCov[1][1];
-    _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
-                      kalmanGain[1] * deltaFSBytes * t00;
-    _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
-                      kalmanGain[1] * deltaFSBytes * t01;
+  // M = (I - K*h)*M
+  t00 = _thetaCov[0][0];
+  t01 = _thetaCov[0][1];
+  _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
+                    kalmanGain[0] * _thetaCov[1][0];
+  _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
+                    kalmanGain[0] * _thetaCov[1][1];
+  _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
+                    kalmanGain[1] * deltaFSBytes * t00;
+  _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
+                    kalmanGain[1] * deltaFSBytes * t01;
 
-    // Covariance matrix, must be positive semi-definite
-    assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
-           _thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
-           _thetaCov[0][0] >= 0);
+  // Covariance matrix, must be positive semi-definite
+  assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
+         _thetaCov[0][0] * _thetaCov[1][1] -
+                 _thetaCov[0][1] * _thetaCov[1][0] >=
+             0 &&
+         _thetaCov[0][0] >= 0);
 }
 
 // Calculate difference in delay between a sample and the
 // expected delay estimated by the Kalman filter
-double
-VCMJitterEstimator::DeviationFromExpectedDelay(int64_t frameDelayMS,
-                                               int32_t deltaFSBytes) const
-{
-    return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
+double VCMJitterEstimator::DeviationFromExpectedDelay(
+    int64_t frameDelayMS,
+    int32_t deltaFSBytes) const {
+  return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
 }
 
 // Estimates the random jitter by calculating the variance of the
@@ -363,61 +341,45 @@
   }
 }
 
-double
-VCMJitterEstimator::NoiseThreshold() const
-{
-    double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
-    if (noiseThreshold < 1.0)
-    {
-        noiseThreshold = 1.0;
-    }
-    return noiseThreshold;
+double VCMJitterEstimator::NoiseThreshold() const {
+  double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
+  if (noiseThreshold < 1.0) {
+    noiseThreshold = 1.0;
+  }
+  return noiseThreshold;
 }
 
 // Calculates the current jitter estimate from the filtered estimates
-double
-VCMJitterEstimator::CalculateEstimate()
-{
-    double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
+double VCMJitterEstimator::CalculateEstimate() {
+  double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
 
-    // A very low estimate (or negative) is neglected
-    if (ret < 1.0) {
-        if (_prevEstimate <= 0.01)
-        {
-            ret = 1.0;
-        }
-        else
-        {
-            ret = _prevEstimate;
-        }
+  // A very low estimate (or negative) is neglected
+  if (ret < 1.0) {
+    if (_prevEstimate <= 0.01) {
+      ret = 1.0;
+    } else {
+      ret = _prevEstimate;
     }
-    if (ret > 10000.0) // Sanity
-    {
-        ret = 10000.0;
-    }
-    _prevEstimate = ret;
-    return ret;
+  }
+  if (ret > 10000.0) {  // Sanity
+    ret = 10000.0;
+  }
+  _prevEstimate = ret;
+  return ret;
 }
 
-void
-VCMJitterEstimator::PostProcessEstimate()
-{
-    _filterJitterEstimate = CalculateEstimate();
+void VCMJitterEstimator::PostProcessEstimate() {
+  _filterJitterEstimate = CalculateEstimate();
 }
 
-void
-VCMJitterEstimator::UpdateRtt(int64_t rttMs)
-{
-    _rttFilter.Update(rttMs);
+void VCMJitterEstimator::UpdateRtt(int64_t rttMs) {
+  _rttFilter.Update(rttMs);
 }
 
-void
-VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes)
-{
-    if (_maxFrameSize < frameSizeBytes)
-    {
-        _maxFrameSize = frameSizeBytes;
-    }
+void VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes) {
+  if (_maxFrameSize < frameSizeBytes) {
+    _maxFrameSize = frameSizeBytes;
+  }
 }
 
 // Returns the current filtered estimate if available,
@@ -478,5 +440,4 @@
   }
   return fps;
 }
-
-}
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/jitter_estimator.h b/webrtc/modules/video_coding/jitter_estimator.h
index 580e114..a7b4b3e 100644
--- a/webrtc/modules/video_coding/jitter_estimator.h
+++ b/webrtc/modules/video_coding/jitter_estimator.h
@@ -15,151 +15,156 @@
 #include "webrtc/modules/video_coding/rtt_filter.h"
 #include "webrtc/typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
 class Clock;
 
-class VCMJitterEstimator
-{
-public:
-    VCMJitterEstimator(const Clock* clock,
-                       int32_t vcmId = 0,
-                       int32_t receiverId = 0);
-    virtual ~VCMJitterEstimator();
-    VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
+class VCMJitterEstimator {
+ public:
+  VCMJitterEstimator(const Clock* clock,
+                     int32_t vcmId = 0,
+                     int32_t receiverId = 0);
+  virtual ~VCMJitterEstimator();
+  VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
 
-    // Resets the estimate to the initial state
-    void Reset();
-    void ResetNackCount();
+  // Resets the estimate to the initial state
+  void Reset();
+  void ResetNackCount();
 
-    // Updates the jitter estimate with the new data.
-    //
-    // Input:
-    //          - frameDelay      : Delay-delta calculated by UTILDelayEstimate in milliseconds
-    //          - frameSize       : Frame size of the current frame.
-    //          - incompleteFrame : Flags if the frame is used to update the estimate before it
-    //                              was complete. Default is false.
-    void UpdateEstimate(int64_t frameDelayMS,
-                        uint32_t frameSizeBytes,
-                        bool incompleteFrame = false);
+  // Updates the jitter estimate with the new data.
+  //
+  // Input:
+  //          - frameDelay      : Delay-delta calculated by UTILDelayEstimate in
+  //          milliseconds
+  //          - frameSize       : Frame size of the current frame.
+  //          - incompleteFrame : Flags if the frame is used to update the
+  //          estimate before it
+  //                              was complete. Default is false.
+  void UpdateEstimate(int64_t frameDelayMS,
+                      uint32_t frameSizeBytes,
+                      bool incompleteFrame = false);
 
-    // Returns the current jitter estimate in milliseconds and adds
-    // also adds an RTT dependent term in cases of retransmission.
-    //  Input:
-    //          - rttMultiplier  : RTT param multiplier (when applicable).
-    //
-    // Return value                   : Jitter estimate in milliseconds
-    int GetJitterEstimate(double rttMultiplier);
+  // Returns the current jitter estimate in milliseconds and adds
+  // also adds an RTT dependent term in cases of retransmission.
+  //  Input:
+  //          - rttMultiplier  : RTT param multiplier (when applicable).
+  //
+  // Return value                   : Jitter estimate in milliseconds
+  int GetJitterEstimate(double rttMultiplier);
 
-    // Updates the nack counter.
-    void FrameNacked();
+  // Updates the nack counter.
+  void FrameNacked();
 
-    // Updates the RTT filter.
-    //
-    // Input:
-    //          - rttMs               : RTT in ms
-    void UpdateRtt(int64_t rttMs);
+  // Updates the RTT filter.
+  //
+  // Input:
+  //          - rttMs               : RTT in ms
+  void UpdateRtt(int64_t rttMs);
 
-    void UpdateMaxFrameSize(uint32_t frameSizeBytes);
+  void UpdateMaxFrameSize(uint32_t frameSizeBytes);
 
-    // A constant describing the delay from the jitter buffer
-    // to the delay on the receiving side which is not accounted
-    // for by the jitter buffer nor the decoding delay estimate.
-    static const uint32_t OPERATING_SYSTEM_JITTER = 10;
+  // A constant describing the delay from the jitter buffer
+  // to the delay on the receiving side which is not accounted
+  // for by the jitter buffer nor the decoding delay estimate.
+  static const uint32_t OPERATING_SYSTEM_JITTER = 10;
 
-protected:
-    // These are protected for better testing possibilities
-    double              _theta[2]; // Estimated line parameters (slope, offset)
-    double              _varNoise; // Variance of the time-deviation from the line
+ protected:
+  // These are protected for better testing possibilities
+  double _theta[2];  // Estimated line parameters (slope, offset)
+  double _varNoise;  // Variance of the time-deviation from the line
 
-    virtual bool LowRateExperimentEnabled();
+  virtual bool LowRateExperimentEnabled();
 
-private:
-    // Updates the Kalman filter for the line describing
-    // the frame size dependent jitter.
-    //
-    // Input:
-    //          - frameDelayMS    : Delay-delta calculated by UTILDelayEstimate in milliseconds
-    //          - deltaFSBytes    : Frame size delta, i.e.
-    //                            : frame size at time T minus frame size at time T-1
-    void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
+ private:
+  // Updates the Kalman filter for the line describing
+  // the frame size dependent jitter.
+  //
+  // Input:
+  //          - frameDelayMS    : Delay-delta calculated by UTILDelayEstimate in
+  //          milliseconds
+  //          - deltaFSBytes    : Frame size delta, i.e.
+  //                            : frame size at time T minus frame size at time
+  //                            T-1
+  void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
 
-    // Updates the random jitter estimate, i.e. the variance
-    // of the time deviations from the line given by the Kalman filter.
-    //
-    // Input:
-    //          - d_dT              : The deviation from the kalman estimate
-    //          - incompleteFrame   : True if the frame used to update the estimate
-    //                                with was incomplete
-    void EstimateRandomJitter(double d_dT, bool incompleteFrame);
+  // Updates the random jitter estimate, i.e. the variance
+  // of the time deviations from the line given by the Kalman filter.
+  //
+  // Input:
+  //          - d_dT              : The deviation from the kalman estimate
+  //          - incompleteFrame   : True if the frame used to update the
+  //          estimate
+  //                                with was incomplete
+  void EstimateRandomJitter(double d_dT, bool incompleteFrame);
 
-    double NoiseThreshold() const;
+  double NoiseThreshold() const;
 
-    // Calculates the current jitter estimate.
-    //
-    // Return value                 : The current jitter estimate in milliseconds
-    double CalculateEstimate();
+  // Calculates the current jitter estimate.
+  //
+  // Return value                 : The current jitter estimate in milliseconds
+  double CalculateEstimate();
 
-    // Post process the calculated estimate
-    void PostProcessEstimate();
+  // Post process the calculated estimate
+  void PostProcessEstimate();
 
-    // Calculates the difference in delay between a sample and the
-    // expected delay estimated by the Kalman filter.
-    //
-    // Input:
-    //          - frameDelayMS    : Delay-delta calculated by UTILDelayEstimate in milliseconds
-    //          - deltaFS         : Frame size delta, i.e. frame size at time
-    //                              T minus frame size at time T-1
-    //
-    // Return value                 : The difference in milliseconds
-    double DeviationFromExpectedDelay(int64_t frameDelayMS,
-                                      int32_t deltaFSBytes) const;
+  // Calculates the difference in delay between a sample and the
+  // expected delay estimated by the Kalman filter.
+  //
+  // Input:
+  //          - frameDelayMS    : Delay-delta calculated by UTILDelayEstimate in
+  //          milliseconds
+  //          - deltaFS         : Frame size delta, i.e. frame size at time
+  //                              T minus frame size at time T-1
+  //
+  // Return value                 : The difference in milliseconds
+  double DeviationFromExpectedDelay(int64_t frameDelayMS,
+                                    int32_t deltaFSBytes) const;
 
-    double GetFrameRate() const;
+  double GetFrameRate() const;
 
-    // Constants, filter parameters
-    int32_t         _vcmId;
-    int32_t         _receiverId;
-    const double          _phi;
-    const double          _psi;
-    const uint32_t  _alphaCountMax;
-    const double          _thetaLow;
-    const uint32_t  _nackLimit;
-    const int32_t   _numStdDevDelayOutlier;
-    const int32_t   _numStdDevFrameSizeOutlier;
-    const double          _noiseStdDevs;
-    const double          _noiseStdDevOffset;
+  // Constants, filter parameters
+  int32_t _vcmId;
+  int32_t _receiverId;
+  const double _phi;
+  const double _psi;
+  const uint32_t _alphaCountMax;
+  const double _thetaLow;
+  const uint32_t _nackLimit;
+  const int32_t _numStdDevDelayOutlier;
+  const int32_t _numStdDevFrameSizeOutlier;
+  const double _noiseStdDevs;
+  const double _noiseStdDevOffset;
 
-    double                _thetaCov[2][2]; // Estimate covariance
-    double                _Qcov[2][2];     // Process noise covariance
-    double                _avgFrameSize;   // Average frame size
-    double                _varFrameSize;   // Frame size variance
-    double                _maxFrameSize;   // Largest frame size received (descending
-                                           // with a factor _psi)
-    uint32_t        _fsSum;
-    uint32_t        _fsCount;
+  double _thetaCov[2][2];  // Estimate covariance
+  double _Qcov[2][2];      // Process noise covariance
+  double _avgFrameSize;    // Average frame size
+  double _varFrameSize;    // Frame size variance
+  double _maxFrameSize;    // Largest frame size received (descending
+                           // with a factor _psi)
+  uint32_t _fsSum;
+  uint32_t _fsCount;
 
-    int64_t         _lastUpdateT;
-    double                _prevEstimate;         // The previously returned jitter estimate
-    uint32_t        _prevFrameSize;        // Frame size of the previous frame
-    double                _avgNoise;             // Average of the random jitter
-    uint32_t        _alphaCount;
-    double                _filterJitterEstimate; // The filtered sum of jitter estimates
+  int64_t _lastUpdateT;
+  double _prevEstimate;     // The previously returned jitter estimate
+  uint32_t _prevFrameSize;  // Frame size of the previous frame
+  double _avgNoise;         // Average of the random jitter
+  uint32_t _alphaCount;
+  double _filterJitterEstimate;  // The filtered sum of jitter estimates
 
-    uint32_t        _startupCount;
+  uint32_t _startupCount;
 
-    int64_t         _latestNackTimestamp;  // Timestamp in ms when the latest nack was seen
-    uint32_t        _nackCount;            // Keeps track of the number of nacks received,
-                                                 // but never goes above _nackLimit
-    VCMRttFilter          _rttFilter;
+  int64_t
+      _latestNackTimestamp;  // Timestamp in ms when the latest nack was seen
+  uint32_t _nackCount;       // Keeps track of the number of nacks received,
+                             // but never goes above _nackLimit
+  VCMRttFilter _rttFilter;
 
-    rtc::RollingAccumulator<uint64_t> fps_counter_;
-    enum ExperimentFlag { kInit, kEnabled, kDisabled };
-    ExperimentFlag low_rate_experiment_;
-    const Clock* clock_;
+  rtc::RollingAccumulator<uint64_t> fps_counter_;
+  enum ExperimentFlag { kInit, kEnabled, kDisabled };
+  ExperimentFlag low_rate_experiment_;
+  const Clock* clock_;
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/media_opt_util.cc b/webrtc/modules/video_coding/media_opt_util.cc
index 9bd7226..d57e9c8 100644
--- a/webrtc/modules/video_coding/media_opt_util.cc
+++ b/webrtc/modules/video_coding/media_opt_util.cc
@@ -10,11 +10,12 @@
 
 #include "webrtc/modules/video_coding/media_opt_util.h"
 
-#include <algorithm>
 #include <float.h>
 #include <limits.h>
 #include <math.h>
 
+#include <algorithm>
+
 #include "webrtc/modules/include/module_common_types.h"
 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
 #include "webrtc/modules/video_coding/include/video_coding_defines.h"
@@ -37,18 +38,14 @@
       _useUepProtectionK(false),
       _useUepProtectionD(true),
       _corrFecCost(1.0),
-      _type(kNone) {
-}
+      _type(kNone) {}
 
-VCMProtectionMethod::~VCMProtectionMethod()
-{
-    delete _qmRobustness;
+VCMProtectionMethod::~VCMProtectionMethod() {
+  delete _qmRobustness;
 }
-void
-VCMProtectionMethod::UpdateContentMetrics(const
-                                          VideoContentMetrics* contentMetrics)
-{
-    _qmRobustness->UpdateContent(contentMetrics);
+void VCMProtectionMethod::UpdateContentMetrics(
+    const VideoContentMetrics* contentMetrics) {
+  _qmRobustness->UpdateContent(contentMetrics);
 }
 
 VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
@@ -64,51 +61,45 @@
   _type = kNackFec;
 }
 
-VCMNackFecMethod::~VCMNackFecMethod()
-{
-    //
+VCMNackFecMethod::~VCMNackFecMethod() {
+  //
 }
-bool
-VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
-    // Hybrid Nack FEC has three operational modes:
-    // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
-    //    (_protectionFactorD) to zero. -1 means no FEC.
-    // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
-    //    -1 means always allow NACK.
-    // 3. Medium RTT values - Hybrid mode: We will only nack the
-    //    residual following the decoding of the FEC (refer to JB logic). FEC
-    //    delta protection factor will be adjusted based on the RTT.
+bool VCMNackFecMethod::ProtectionFactor(
+    const VCMProtectionParameters* parameters) {
+  // Hybrid Nack FEC has three operational modes:
+  // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+  //    (_protectionFactorD) to zero. -1 means no FEC.
+  // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+  //    -1 means always allow NACK.
+  // 3. Medium RTT values - Hybrid mode: We will only nack the
+  //    residual following the decoding of the FEC (refer to JB logic). FEC
+  //    delta protection factor will be adjusted based on the RTT.
 
-    // Otherwise: we count on FEC; if the RTT is below a threshold, then we
-    // nack the residual, based on a decision made in the JB.
+  // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+  // nack the residual, based on a decision made in the JB.
 
-    // Compute the protection factors
-    VCMFecMethod::ProtectionFactor(parameters);
-    if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs)
-    {
-        _protectionFactorD = 0;
-        VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
-    }
+  // Compute the protection factors
+  VCMFecMethod::ProtectionFactor(parameters);
+  if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs) {
+    _protectionFactorD = 0;
+    VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
 
     // When in Hybrid mode (RTT range), adjust FEC rates based on the
     // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
-    else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
-    {
-        // TODO(mikhal): Disabling adjustment temporarily.
-        // uint16_t rttIndex = (uint16_t) parameters->rtt;
-        float adjustRtt = 1.0f;// (float)VCMNackFecTable[rttIndex] / 100.0f;
+  } else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs) {
+    // TODO(mikhal): Disabling adjustment temporarily.
+    // uint16_t rttIndex = (uint16_t) parameters->rtt;
+    float adjustRtt = 1.0f;  // (float)VCMNackFecTable[rttIndex] / 100.0f;
 
-        // Adjust FEC with NACK on (for delta frame only)
-        // table depends on RTT relative to rttMax (NACK Threshold)
-        _protectionFactorD = static_cast<uint8_t>
-                            (adjustRtt *
-                             static_cast<float>(_protectionFactorD));
-        // update FEC rates after applying adjustment
-        VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
-    }
+    // Adjust FEC with NACK on (for delta frame only)
+    // table depends on RTT relative to rttMax (NACK Threshold)
+    _protectionFactorD = static_cast<uint8_t>(
+        adjustRtt * static_cast<float>(_protectionFactorD));
+    // update FEC rates after applying adjustment
+    VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+  }
 
-    return true;
+  return true;
 }
 
 int VCMNackFecMethod::ComputeMaxFramesFec(
@@ -123,11 +114,13 @@
   // we will have complete frames in one RTT. Note that this is an upper
   // bound, and that the actual number of frames used for FEC is decided by the
   // RTP module based on the actual number of packets and the protection factor.
-  float base_layer_framerate = parameters->frameRate /
+  float base_layer_framerate =
+      parameters->frameRate /
       static_cast<float>(1 << (parameters->numLayers - 1));
-  int max_frames_fec = std::max(static_cast<int>(
-      2.0f * base_layer_framerate * parameters->rtt /
-      1000.0f + 0.5f), 1);
+  int max_frames_fec = std::max(
+      static_cast<int>(2.0f * base_layer_framerate * parameters->rtt / 1000.0f +
+                       0.5f),
+      1);
   // |kUpperLimitFramesFec| is the upper limit on how many frames we
   // allow any FEC to be based on.
   if (max_frames_fec > kUpperLimitFramesFec) {
@@ -155,325 +148,285 @@
   } else if (num_pixels > 640 * 480) {
     max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
   }
-  // TODO (marpan): add condition based on maximum frames used for FEC,
+  // TODO(marpan): add condition based on maximum frames used for FEC,
   // and expand condition based on frame size.
   // Max round trip time threshold in ms.
   const int64_t kMaxRttTurnOffFec = 200;
   if (estimate_bytes_per_frame < max_bytes_per_frame &&
-      parameters->numLayers < 3 &&
-      parameters->rtt < kMaxRttTurnOffFec) {
+      parameters->numLayers < 3 && parameters->rtt < kMaxRttTurnOffFec) {
     return true;
   }
   return false;
 }
 
-bool
-VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
-    // Set the effective packet loss for encoder (based on FEC code).
-    // Compute the effective packet loss and residual packet loss due to FEC.
-    VCMFecMethod::EffectivePacketLoss(parameters);
-    return true;
+bool VCMNackFecMethod::EffectivePacketLoss(
+    const VCMProtectionParameters* parameters) {
+  // Set the effective packet loss for encoder (based on FEC code).
+  // Compute the effective packet loss and residual packet loss due to FEC.
+  VCMFecMethod::EffectivePacketLoss(parameters);
+  return true;
 }
 
-bool
-VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
-    ProtectionFactor(parameters);
-    EffectivePacketLoss(parameters);
-    _maxFramesFec = ComputeMaxFramesFec(parameters);
-    if (BitRateTooLowForFec(parameters)) {
-      _protectionFactorK = 0;
-      _protectionFactorD = 0;
-    }
+bool VCMNackFecMethod::UpdateParameters(
+    const VCMProtectionParameters* parameters) {
+  ProtectionFactor(parameters);
+  EffectivePacketLoss(parameters);
+  _maxFramesFec = ComputeMaxFramesFec(parameters);
+  if (BitRateTooLowForFec(parameters)) {
+    _protectionFactorK = 0;
+    _protectionFactorD = 0;
+  }
 
-    // Protection/fec rates obtained above are defined relative to total number
-    // of packets (total rate: source + fec) FEC in RTP module assumes
-    // protection factor is defined relative to source number of packets so we
-    // should convert the factor to reduce mismatch between mediaOpt's rate and
-    // the actual one
-    _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
-    _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+  // Protection/fec rates obtained above are defined relative to total number
+  // of packets (total rate: source + fec) FEC in RTP module assumes
+  // protection factor is defined relative to source number of packets so we
+  // should convert the factor to reduce mismatch between mediaOpt's rate and
+  // the actual one
+  _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+  _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
 
-    return true;
+  return true;
 }
 
-VCMNackMethod::VCMNackMethod():
-VCMProtectionMethod()
-{
-    _type = kNack;
+VCMNackMethod::VCMNackMethod() : VCMProtectionMethod() {
+  _type = kNack;
 }
 
-VCMNackMethod::~VCMNackMethod()
-{
-    //
+VCMNackMethod::~VCMNackMethod() {
+  //
 }
 
-bool
-VCMNackMethod::EffectivePacketLoss(const VCMProtectionParameters* parameter)
-{
-    // Effective Packet Loss, NA in current version.
-    _effectivePacketLoss = 0;
-    return true;
+bool VCMNackMethod::EffectivePacketLoss(
+    const VCMProtectionParameters* parameter) {
+  // Effective Packet Loss, NA in current version.
+  _effectivePacketLoss = 0;
+  return true;
 }
 
-bool
-VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
-    // Compute the effective packet loss
-    EffectivePacketLoss(parameters);
+bool VCMNackMethod::UpdateParameters(
+    const VCMProtectionParameters* parameters) {
+  // Compute the effective packet loss
+  EffectivePacketLoss(parameters);
 
-    // nackCost  = (bitRate - nackCost) * (lossPr)
-    return true;
+  // nackCost  = (bitRate - nackCost) * (lossPr)
+  return true;
 }
 
-VCMFecMethod::VCMFecMethod():
-VCMProtectionMethod()
-{
-    _type = kFec;
+VCMFecMethod::VCMFecMethod() : VCMProtectionMethod() {
+  _type = kFec;
 }
-VCMFecMethod::~VCMFecMethod()
-{
-    //
+VCMFecMethod::~VCMFecMethod() {
+  //
 }
 
-uint8_t
-VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
-                               uint8_t packetFrameKey) const
-{
-    uint8_t boostRateKey = 2;
-    // Default: ratio scales the FEC protection up for I frames
-    uint8_t ratio = 1;
+uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+                                       uint8_t packetFrameKey) const {
+  uint8_t boostRateKey = 2;
+  // Default: ratio scales the FEC protection up for I frames
+  uint8_t ratio = 1;
 
-    if (packetFrameDelta > 0)
-    {
-        ratio = (int8_t) (packetFrameKey / packetFrameDelta);
-    }
-    ratio = VCM_MAX(boostRateKey, ratio);
+  if (packetFrameDelta > 0) {
+    ratio = (int8_t)(packetFrameKey / packetFrameDelta);
+  }
+  ratio = VCM_MAX(boostRateKey, ratio);
 
-    return ratio;
+  return ratio;
 }
 
-uint8_t
-VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const
-{
-    return static_cast<uint8_t> (VCM_MIN(255,(0.5 + 255.0 * codeRateRTP /
-                                      (float)(255 - codeRateRTP))));
+uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
+  return static_cast<uint8_t>(VCM_MIN(
+      255,
+      (0.5 + 255.0 * codeRateRTP / static_cast<float>(255 - codeRateRTP))));
 }
 
 // Update FEC with protectionFactorD
-void
-VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD)
-{
-    _protectionFactorD = protectionFactorD;
+void VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD) {
+  _protectionFactorD = protectionFactorD;
 }
 
 // Update FEC with protectionFactorK
-void
-VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK)
-{
-    _protectionFactorK = protectionFactorK;
+void VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK) {
+  _protectionFactorK = protectionFactorK;
 }
 
-bool
-VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
-    // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
+  // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
 
-    // No protection if (filtered) packetLoss is 0
-    uint8_t packetLoss = (uint8_t) (255 * parameters->lossPr);
-    if (packetLoss == 0)
-    {
-        _protectionFactorK = 0;
-        _protectionFactorD = 0;
-         return true;
-    }
-
-    // Parameters for FEC setting:
-    // first partition size, thresholds, table pars, spatial resoln fac.
-
-    // First partition protection: ~ 20%
-    uint8_t firstPartitionProt = (uint8_t) (255 * 0.20);
-
-    // Minimum protection level needed to generate one FEC packet for one
-    // source packet/frame (in RTP sender)
-    uint8_t minProtLevelFec = 85;
-
-    // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
-    // above which we allocate protection to cover at least first partition.
-    uint8_t lossThr = 0;
-    uint8_t packetNumThr = 1;
-
-    // Parameters for range of rate index of table.
-    const uint8_t ratePar1 = 5;
-    const uint8_t ratePar2 = 49;
-
-    // Spatial resolution size, relative to a reference size.
-    float spatialSizeToRef = static_cast<float>
-                           (parameters->codecWidth * parameters->codecHeight) /
-                           (static_cast<float>(704 * 576));
-    // resolnFac: This parameter will generally increase/decrease the FEC rate
-    // (for fixed bitRate and packetLoss) based on system size.
-    // Use a smaller exponent (< 1) to control/soften system size effect.
-    const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
-
-    const int bitRatePerFrame = BitsPerFrame(parameters);
-
-
-    // Average number of packets per frame (source and fec):
-    const uint8_t avgTotPackets = 1 + (uint8_t)
-                                        ((float) bitRatePerFrame * 1000.0
-                                       / (float) (8.0 * _maxPayloadSize) + 0.5);
-
-    // FEC rate parameters: for P and I frame
-    uint8_t codeRateDelta = 0;
-    uint8_t codeRateKey = 0;
-
-    // Get index for table: the FEC protection depends on an effective rate.
-    // The range on the rate index corresponds to rates (bps)
-    // from ~200k to ~8000k, for 30fps
-    const uint16_t effRateFecTable = static_cast<uint16_t>
-                                           (resolnFac * bitRatePerFrame);
-    uint8_t rateIndexTable =
-        (uint8_t) VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) /
-                                         ratePar1, ratePar2), 0);
-
-    // Restrict packet loss range to 50:
-    // current tables defined only up to 50%
-    if (packetLoss >= kPacketLossMax)
-    {
-        packetLoss = kPacketLossMax - 1;
-    }
-    uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
-
-    // Check on table index
-    assert(indexTable < kSizeCodeRateXORTable);
-
-    // Protection factor for P frame
-    codeRateDelta = kCodeRateXORTable[indexTable];
-
-    if (packetLoss > lossThr && avgTotPackets > packetNumThr)
-    {
-        // Set a minimum based on first partition size.
-        if (codeRateDelta < firstPartitionProt)
-        {
-            codeRateDelta = firstPartitionProt;
-        }
-    }
-
-    // Check limit on amount of protection for P frame; 50% is max.
-    if (codeRateDelta >= kPacketLossMax)
-    {
-        codeRateDelta = kPacketLossMax - 1;
-    }
-
-    float adjustFec = 1.0f;
-    // Avoid additional adjustments when layers are active.
-    // TODO(mikhal/marco): Update adjusmtent based on layer info.
-    if (parameters->numLayers == 1)
-    {
-        adjustFec = _qmRobustness->AdjustFecFactor(codeRateDelta,
-                                                   parameters->bitRate,
-                                                   parameters->frameRate,
-                                                   parameters->rtt,
-                                                   packetLoss);
-    }
-
-    codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
-
-    // For Key frame:
-    // Effectively at a higher rate, so we scale/boost the rate
-    // The boost factor may depend on several factors: ratio of packet
-    // number of I to P frames, how much protection placed on P frames, etc.
-    const uint8_t packetFrameDelta = (uint8_t)
-                                           (0.5 + parameters->packetsPerFrame);
-    const uint8_t packetFrameKey = (uint8_t)
-                                         (0.5 + parameters->packetsPerFrameKey);
-    const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta,
-                                                    packetFrameKey);
-
-    rateIndexTable = (uint8_t) VCM_MAX(VCM_MIN(
-                      1 + (boostKey * effRateFecTable - ratePar1) /
-                      ratePar1,ratePar2),0);
-    uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
-
-    indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
-
-    // Check on table index
-    assert(indexTableKey < kSizeCodeRateXORTable);
-
-    // Protection factor for I frame
-    codeRateKey = kCodeRateXORTable[indexTableKey];
-
-    // Boosting for Key frame.
-    int boostKeyProt = _scaleProtKey * codeRateDelta;
-    if (boostKeyProt >= kPacketLossMax)
-    {
-        boostKeyProt = kPacketLossMax - 1;
-    }
-
-    // Make sure I frame protection is at least larger than P frame protection,
-    // and at least as high as filtered packet loss.
-    codeRateKey = static_cast<uint8_t> (VCM_MAX(packetLoss,
-            VCM_MAX(boostKeyProt, codeRateKey)));
-
-    // Check limit on amount of protection for I frame: 50% is max.
-    if (codeRateKey >= kPacketLossMax)
-    {
-        codeRateKey = kPacketLossMax - 1;
-    }
-
-    _protectionFactorK = codeRateKey;
-    _protectionFactorD = codeRateDelta;
-
-    // Generally there is a rate mis-match between the FEC cost estimated
-    // in mediaOpt and the actual FEC cost sent out in RTP module.
-    // This is more significant at low rates (small # of source packets), where
-    // the granularity of the FEC decreases. In this case, non-zero protection
-    // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
-    // is based on rounding off protectionFactor on actual source packet number).
-    // The correction factor (_corrFecCost) attempts to corrects this, at least
-    // for cases of low rates (small #packets) and low protection levels.
-
-    float numPacketsFl = 1.0f + ((float) bitRatePerFrame * 1000.0
-                                / (float) (8.0 * _maxPayloadSize) + 0.5);
-
-    const float estNumFecGen = 0.5f + static_cast<float> (_protectionFactorD *
-                                                         numPacketsFl / 255.0f);
-
-
-    // We reduce cost factor (which will reduce overhead for FEC and
-    // hybrid method) and not the protectionFactor.
-    _corrFecCost = 1.0f;
-    if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec)
-    {
-        _corrFecCost = 0.5f;
-    }
-    if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec)
-    {
-        _corrFecCost = 0.0f;
-    }
-
-     // TODO (marpan): Set the UEP protection on/off for Key and Delta frames
-    _useUepProtectionK = _qmRobustness->SetUepProtection(codeRateKey,
-                                                         parameters->bitRate,
-                                                         packetLoss,
-                                                         0);
-
-    _useUepProtectionD = _qmRobustness->SetUepProtection(codeRateDelta,
-                                                         parameters->bitRate,
-                                                         packetLoss,
-                                                         1);
-
-    // DONE WITH FEC PROTECTION SETTINGS
+  // No protection if (filtered) packetLoss is 0
+  uint8_t packetLoss = (uint8_t)(255 * parameters->lossPr);
+  if (packetLoss == 0) {
+    _protectionFactorK = 0;
+    _protectionFactorD = 0;
     return true;
+  }
+
+  // Parameters for FEC setting:
+  // first partition size, thresholds, table pars, spatial resoln fac.
+
+  // First partition protection: ~ 20%
+  uint8_t firstPartitionProt = (uint8_t)(255 * 0.20);
+
+  // Minimum protection level needed to generate one FEC packet for one
+  // source packet/frame (in RTP sender)
+  uint8_t minProtLevelFec = 85;
+
+  // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+  // above which we allocate protection to cover at least first partition.
+  uint8_t lossThr = 0;
+  uint8_t packetNumThr = 1;
+
+  // Parameters for range of rate index of table.
+  const uint8_t ratePar1 = 5;
+  const uint8_t ratePar2 = 49;
+
+  // Spatial resolution size, relative to a reference size.
+  float spatialSizeToRef =
+      static_cast<float>(parameters->codecWidth * parameters->codecHeight) /
+      (static_cast<float>(704 * 576));
+  // resolnFac: This parameter will generally increase/decrease the FEC rate
+  // (for fixed bitRate and packetLoss) based on system size.
+  // Use a smaller exponent (< 1) to control/soften system size effect.
+  const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+  const int bitRatePerFrame = BitsPerFrame(parameters);
+
+  // Average number of packets per frame (source and fec):
+  const uint8_t avgTotPackets =
+      1 + (uint8_t)(static_cast<float>(bitRatePerFrame) * 1000.0 /
+                        static_cast<float>(8.0 * _maxPayloadSize) +
+                    0.5);
+
+  // FEC rate parameters: for P and I frame
+  uint8_t codeRateDelta = 0;
+  uint8_t codeRateKey = 0;
+
+  // Get index for table: the FEC protection depends on an effective rate.
+  // The range on the rate index corresponds to rates (bps)
+  // from ~200k to ~8000k, for 30fps
+  const uint16_t effRateFecTable =
+      static_cast<uint16_t>(resolnFac * bitRatePerFrame);
+  uint8_t rateIndexTable = (uint8_t)VCM_MAX(
+      VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0);
+
+  // Restrict packet loss range to 50:
+  // current tables defined only up to 50%
+  if (packetLoss >= kPacketLossMax) {
+    packetLoss = kPacketLossMax - 1;
+  }
+  uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+  // Check on table index
+  assert(indexTable < kSizeCodeRateXORTable);
+
+  // Protection factor for P frame
+  codeRateDelta = kCodeRateXORTable[indexTable];
+
+  if (packetLoss > lossThr && avgTotPackets > packetNumThr) {
+    // Set a minimum based on first partition size.
+    if (codeRateDelta < firstPartitionProt) {
+      codeRateDelta = firstPartitionProt;
+    }
+  }
+
+  // Check limit on amount of protection for P frame; 50% is max.
+  if (codeRateDelta >= kPacketLossMax) {
+    codeRateDelta = kPacketLossMax - 1;
+  }
+
+  float adjustFec = 1.0f;
+  // Avoid additional adjustments when layers are active.
+  // TODO(mikhal/marco): Update adjusmtent based on layer info.
+  if (parameters->numLayers == 1) {
+    adjustFec = _qmRobustness->AdjustFecFactor(
+        codeRateDelta, parameters->bitRate, parameters->frameRate,
+        parameters->rtt, packetLoss);
+  }
+
+  codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
+
+  // For Key frame:
+  // Effectively at a higher rate, so we scale/boost the rate
+  // The boost factor may depend on several factors: ratio of packet
+  // number of I to P frames, how much protection placed on P frames, etc.
+  const uint8_t packetFrameDelta = (uint8_t)(0.5 + parameters->packetsPerFrame);
+  const uint8_t packetFrameKey =
+      (uint8_t)(0.5 + parameters->packetsPerFrameKey);
+  const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
+
+  rateIndexTable = (uint8_t)VCM_MAX(
+      VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
+      0);
+  uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+  indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
+
+  // Check on table index
+  assert(indexTableKey < kSizeCodeRateXORTable);
+
+  // Protection factor for I frame
+  codeRateKey = kCodeRateXORTable[indexTableKey];
+
+  // Boosting for Key frame.
+  int boostKeyProt = _scaleProtKey * codeRateDelta;
+  if (boostKeyProt >= kPacketLossMax) {
+    boostKeyProt = kPacketLossMax - 1;
+  }
+
+  // Make sure I frame protection is at least larger than P frame protection,
+  // and at least as high as filtered packet loss.
+  codeRateKey = static_cast<uint8_t>(
+      VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
+
+  // Check limit on amount of protection for I frame: 50% is max.
+  if (codeRateKey >= kPacketLossMax) {
+    codeRateKey = kPacketLossMax - 1;
+  }
+
+  _protectionFactorK = codeRateKey;
+  _protectionFactorD = codeRateDelta;
+
+  // Generally there is a rate mis-match between the FEC cost estimated
+  // in mediaOpt and the actual FEC cost sent out in RTP module.
+  // This is more significant at low rates (small # of source packets), where
+  // the granularity of the FEC decreases. In this case, non-zero protection
+  // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+  // is based on rounding off protectionFactor on actual source packet number).
+  // The correction factor (_corrFecCost) attempts to corrects this, at least
+  // for cases of low rates (small #packets) and low protection levels.
+
+  float numPacketsFl = 1.0f + (static_cast<float>(bitRatePerFrame) * 1000.0 /
+                                   static_cast<float>(8.0 * _maxPayloadSize) +
+                               0.5);
+
+  const float estNumFecGen =
+      0.5f + static_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
+
+  // We reduce cost factor (which will reduce overhead for FEC and
+  // hybrid method) and not the protectionFactor.
+  _corrFecCost = 1.0f;
+  if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec) {
+    _corrFecCost = 0.5f;
+  }
+  if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec) {
+    _corrFecCost = 0.0f;
+  }
+
+  // TODO(marpan): Set the UEP protection on/off for Key and Delta frames
+  _useUepProtectionK = _qmRobustness->SetUepProtection(
+      codeRateKey, parameters->bitRate, packetLoss, 0);
+
+  _useUepProtectionD = _qmRobustness->SetUepProtection(
+      codeRateDelta, parameters->bitRate, packetLoss, 1);
+
+  // DONE WITH FEC PROTECTION SETTINGS
+  return true;
 }
 
 int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
   // When temporal layers are available FEC will only be applied on the base
   // layer.
   const float bitRateRatio =
-    kVp8LayerRateAlloction[parameters->numLayers - 1][0];
+      kVp8LayerRateAlloction[parameters->numLayers - 1][0];
   float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
   float bitRate = parameters->bitRate * bitRateRatio;
   float frameRate = parameters->frameRate * frameRateRatio;
@@ -485,64 +438,59 @@
   return static_cast<int>(adjustmentFactor * bitRate / frameRate);
 }
 
-bool
-VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
-    // Effective packet loss to encoder is based on RPL (residual packet loss)
-    // this is a soft setting based on degree of FEC protection
-    // RPL = received/input packet loss - average_FEC_recovery
-    // note: received/input packet loss may be filtered based on FilteredLoss
+bool VCMFecMethod::EffectivePacketLoss(
+    const VCMProtectionParameters* parameters) {
+  // Effective packet loss to encoder is based on RPL (residual packet loss)
+  // this is a soft setting based on degree of FEC protection
+  // RPL = received/input packet loss - average_FEC_recovery
+  // note: received/input packet loss may be filtered based on FilteredLoss
 
-    // Effective Packet Loss, NA in current version.
-    _effectivePacketLoss = 0;
+  // Effective Packet Loss, NA in current version.
+  _effectivePacketLoss = 0;
 
-    return true;
+  return true;
 }
 
-bool
-VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
-    // Compute the protection factor
-    ProtectionFactor(parameters);
+bool VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters) {
+  // Compute the protection factor
+  ProtectionFactor(parameters);
 
-    // Compute the effective packet loss
-    EffectivePacketLoss(parameters);
+  // Compute the effective packet loss
+  EffectivePacketLoss(parameters);
 
-    // Protection/fec rates obtained above is defined relative to total number
-    // of packets (total rate: source+fec) FEC in RTP module assumes protection
-    // factor is defined relative to source number of packets so we should
-    // convert the factor to reduce mismatch between mediaOpt suggested rate and
-    // the actual rate
-    _protectionFactorK = ConvertFECRate(_protectionFactorK);
-    _protectionFactorD = ConvertFECRate(_protectionFactorD);
+  // Protection/fec rates obtained above is defined relative to total number
+  // of packets (total rate: source+fec) FEC in RTP module assumes protection
+  // factor is defined relative to source number of packets so we should
+  // convert the factor to reduce mismatch between mediaOpt suggested rate and
+  // the actual rate
+  _protectionFactorK = ConvertFECRate(_protectionFactorK);
+  _protectionFactorD = ConvertFECRate(_protectionFactorD);
 
-    return true;
+  return true;
 }
-VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
-_currentParameters(),
-_rtt(0),
-_lossPr(0.0f),
-_bitRate(0.0f),
-_frameRate(0.0f),
-_keyFrameSize(0.0f),
-_fecRateKey(0),
-_fecRateDelta(0),
-_lastPrUpdateT(0),
-_lossPr255(0.9999f),
-_lossPrHistory(),
-_shortMaxLossPr255(0),
-_packetsPerFrame(0.9999f),
-_packetsPerFrameKey(0.9999f),
-_codecWidth(0),
-_codecHeight(0),
-_numLayers(1)
-{
-    Reset(nowMs);
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs)
+    : _currentParameters(),
+      _rtt(0),
+      _lossPr(0.0f),
+      _bitRate(0.0f),
+      _frameRate(0.0f),
+      _keyFrameSize(0.0f),
+      _fecRateKey(0),
+      _fecRateDelta(0),
+      _lastPrUpdateT(0),
+      _lossPr255(0.9999f),
+      _lossPrHistory(),
+      _shortMaxLossPr255(0),
+      _packetsPerFrame(0.9999f),
+      _packetsPerFrameKey(0.9999f),
+      _codecWidth(0),
+      _codecHeight(0),
+      _numLayers(1) {
+  Reset(nowMs);
 }
 
-VCMLossProtectionLogic::~VCMLossProtectionLogic()
-{
-    Release();
+VCMLossProtectionLogic::~VCMLossProtectionLogic() {
+  Release();
 }
 
 void VCMLossProtectionLogic::SetMethod(
@@ -550,7 +498,7 @@
   if (_selectedMethod && _selectedMethod->Type() == newMethodType)
     return;
 
-  switch(newMethodType) {
+  switch (newMethodType) {
     case kNack:
       _selectedMethod.reset(new VCMNackMethod());
       break;
@@ -567,92 +515,70 @@
   UpdateMethod();
 }
 
-void
-VCMLossProtectionLogic::UpdateRtt(int64_t rtt)
-{
-    _rtt = rtt;
+void VCMLossProtectionLogic::UpdateRtt(int64_t rtt) {
+  _rtt = rtt;
 }
 
-void
-VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
-                                             int64_t now)
-{
-    if (_lossPrHistory[0].timeMs >= 0 &&
-        now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
-    {
-        if (lossPr255 > _shortMaxLossPr255)
-        {
-            _shortMaxLossPr255 = lossPr255;
-        }
+void VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+                                                  int64_t now) {
+  if (_lossPrHistory[0].timeMs >= 0 &&
+      now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs) {
+    if (lossPr255 > _shortMaxLossPr255) {
+      _shortMaxLossPr255 = lossPr255;
     }
-    else
-    {
-        // Only add a new value to the history once a second
-        if (_lossPrHistory[0].timeMs == -1)
-        {
-            // First, no shift
-            _shortMaxLossPr255 = lossPr255;
-        }
-        else
-        {
-            // Shift
-            for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--)
-            {
-                _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
-                _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
-            }
-        }
-        if (_shortMaxLossPr255 == 0)
-        {
-            _shortMaxLossPr255 = lossPr255;
-        }
+  } else {
+    // Only add a new value to the history once a second
+    if (_lossPrHistory[0].timeMs == -1) {
+      // First, no shift
+      _shortMaxLossPr255 = lossPr255;
+    } else {
+      // Shift
+      for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--) {
+        _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+        _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+      }
+    }
+    if (_shortMaxLossPr255 == 0) {
+      _shortMaxLossPr255 = lossPr255;
+    }
 
-        _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
-        _lossPrHistory[0].timeMs = now;
-        _shortMaxLossPr255 = 0;
-    }
+    _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+    _lossPrHistory[0].timeMs = now;
+    _shortMaxLossPr255 = 0;
+  }
 }
 
-uint8_t
-VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const
-{
-    uint8_t maxFound = _shortMaxLossPr255;
-    if (_lossPrHistory[0].timeMs == -1)
-    {
-        return maxFound;
-    }
-    for (int32_t i = 0; i < kLossPrHistorySize; i++)
-    {
-        if (_lossPrHistory[i].timeMs == -1)
-        {
-            break;
-        }
-        if (nowMs - _lossPrHistory[i].timeMs >
-            kLossPrHistorySize * kLossPrShortFilterWinMs)
-        {
-            // This sample (and all samples after this) is too old
-            break;
-        }
-        if (_lossPrHistory[i].lossPr255 > maxFound)
-        {
-            // This sample is the largest one this far into the history
-            maxFound = _lossPrHistory[i].lossPr255;
-        }
-    }
+uint8_t VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const {
+  uint8_t maxFound = _shortMaxLossPr255;
+  if (_lossPrHistory[0].timeMs == -1) {
     return maxFound;
+  }
+  for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+    if (_lossPrHistory[i].timeMs == -1) {
+      break;
+    }
+    if (nowMs - _lossPrHistory[i].timeMs >
+        kLossPrHistorySize * kLossPrShortFilterWinMs) {
+      // This sample (and all samples after this) is too old
+      break;
+    }
+    if (_lossPrHistory[i].lossPr255 > maxFound) {
+      // This sample is the largest one this far into the history
+      maxFound = _lossPrHistory[i].lossPr255;
+    }
+  }
+  return maxFound;
 }
 
-uint8_t VCMLossProtectionLogic::FilteredLoss(
-    int64_t nowMs,
-    FilterPacketLossMode filter_mode,
-    uint8_t lossPr255) {
-
+uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
+                                             FilterPacketLossMode filter_mode,
+                                             uint8_t lossPr255) {
   // Update the max window filter.
   UpdateMaxLossHistory(lossPr255, nowMs);
 
   // Update the recursive average filter.
-  _lossPr255.Apply(static_cast<float> (nowMs - _lastPrUpdateT),
-                   static_cast<float> (lossPr255));
+  _lossPr255.Apply(static_cast<float>(nowMs - _lastPrUpdateT),
+                   static_cast<float>(lossPr255));
   _lastPrUpdateT = nowMs;
 
   // Filtered loss: default is received loss (no filtering).
@@ -672,98 +598,80 @@
   return filtered_loss;
 }
 
-void
-VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc)
-{
-    _lossPr = (float) packetLossEnc / (float) 255.0;
+void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
+  _lossPr = static_cast<float>(packetLossEnc) / 255.0;
 }
 
-void
-VCMLossProtectionLogic::UpdateBitRate(float bitRate)
-{
-    _bitRate = bitRate;
+void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
+  _bitRate = bitRate;
 }
 
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets, int64_t nowMs)
-{
-    _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
-                           nPackets);
-    _lastPacketPerFrameUpdateT = nowMs;
+void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
+                                                   int64_t nowMs) {
+  _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
+                         nPackets);
+  _lastPacketPerFrameUpdateT = nowMs;
 }
 
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs)
-{
-    _packetsPerFrameKey.Apply(static_cast<float>(nowMs -
-                              _lastPacketPerFrameUpdateTKey), nPackets);
-    _lastPacketPerFrameUpdateTKey = nowMs;
+void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
+                                                      int64_t nowMs) {
+  _packetsPerFrameKey.Apply(
+      static_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey), nPackets);
+  _lastPacketPerFrameUpdateTKey = nowMs;
 }
 
-void
-VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
-{
-    _keyFrameSize = keyFrameSize;
+void VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize) {
+  _keyFrameSize = keyFrameSize;
 }
 
-void
-VCMLossProtectionLogic::UpdateFrameSize(uint16_t width,
-                                        uint16_t height)
-{
-    _codecWidth = width;
-    _codecHeight = height;
+void VCMLossProtectionLogic::UpdateFrameSize(uint16_t width, uint16_t height) {
+  _codecWidth = width;
+  _codecHeight = height;
 }
 
 void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
   _numLayers = (numLayers == 0) ? 1 : numLayers;
 }
 
-bool
-VCMLossProtectionLogic::UpdateMethod()
-{
-    if (!_selectedMethod)
-      return false;
-    _currentParameters.rtt = _rtt;
-    _currentParameters.lossPr = _lossPr;
-    _currentParameters.bitRate = _bitRate;
-    _currentParameters.frameRate = _frameRate; // rename actual frame rate?
-    _currentParameters.keyFrameSize = _keyFrameSize;
-    _currentParameters.fecRateDelta = _fecRateDelta;
-    _currentParameters.fecRateKey = _fecRateKey;
-    _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
-    _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
-    _currentParameters.codecWidth = _codecWidth;
-    _currentParameters.codecHeight = _codecHeight;
-    _currentParameters.numLayers = _numLayers;
-    return _selectedMethod->UpdateParameters(&_currentParameters);
+bool VCMLossProtectionLogic::UpdateMethod() {
+  if (!_selectedMethod)
+    return false;
+  _currentParameters.rtt = _rtt;
+  _currentParameters.lossPr = _lossPr;
+  _currentParameters.bitRate = _bitRate;
+  _currentParameters.frameRate = _frameRate;  // rename actual frame rate?
+  _currentParameters.keyFrameSize = _keyFrameSize;
+  _currentParameters.fecRateDelta = _fecRateDelta;
+  _currentParameters.fecRateKey = _fecRateKey;
+  _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+  _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+  _currentParameters.codecWidth = _codecWidth;
+  _currentParameters.codecHeight = _codecHeight;
+  _currentParameters.numLayers = _numLayers;
+  return _selectedMethod->UpdateParameters(&_currentParameters);
 }
 
-VCMProtectionMethod*
-VCMLossProtectionLogic::SelectedMethod() const
-{
-    return _selectedMethod.get();
+VCMProtectionMethod* VCMLossProtectionLogic::SelectedMethod() const {
+  return _selectedMethod.get();
 }
 
 VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
   return _selectedMethod ? _selectedMethod->Type() : kNone;
 }
 
-void
-VCMLossProtectionLogic::Reset(int64_t nowMs)
-{
-    _lastPrUpdateT = nowMs;
-    _lastPacketPerFrameUpdateT = nowMs;
-    _lastPacketPerFrameUpdateTKey = nowMs;
-    _lossPr255.Reset(0.9999f);
-    _packetsPerFrame.Reset(0.9999f);
-    _fecRateDelta = _fecRateKey = 0;
-    for (int32_t i = 0; i < kLossPrHistorySize; i++)
-    {
-        _lossPrHistory[i].lossPr255 = 0;
-        _lossPrHistory[i].timeMs = -1;
-    }
-    _shortMaxLossPr255 = 0;
-    Release();
+void VCMLossProtectionLogic::Reset(int64_t nowMs) {
+  _lastPrUpdateT = nowMs;
+  _lastPacketPerFrameUpdateT = nowMs;
+  _lastPacketPerFrameUpdateTKey = nowMs;
+  _lossPr255.Reset(0.9999f);
+  _packetsPerFrame.Reset(0.9999f);
+  _fecRateDelta = _fecRateKey = 0;
+  for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+    _lossPrHistory[i].lossPr255 = 0;
+    _lossPrHistory[i].timeMs = -1;
+  }
+  _shortMaxLossPr255 = 0;
+  Release();
 }
 
 void VCMLossProtectionLogic::Release() {
diff --git a/webrtc/modules/video_coding/media_opt_util.h b/webrtc/modules/video_coding/media_opt_util.h
index 9be7167..a016a03 100644
--- a/webrtc/modules/video_coding/media_opt_util.h
+++ b/webrtc/modules/video_coding/media_opt_util.h
@@ -25,7 +25,7 @@
 namespace media_optimization {
 
 // Number of time periods used for (max) window filter for packet loss
-// TODO (marpan): set reasonable window size for filtered packet loss,
+// TODO(marpan): set reasonable window size for filtered packet loss,
 // adjustment should be based on logged/real data of loss stats/correlation.
 enum { kLossPrHistorySize = 10 };
 
@@ -34,331 +34,328 @@
 
 // The type of filter used on the received packet loss reports.
 enum FilterPacketLossMode {
-  kNoFilter,    // No filtering on received loss.
-  kAvgFilter,   // Recursive average filter.
-  kMaxFilter    // Max-window filter, over the time interval of:
-                // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+  kNoFilter,   // No filtering on received loss.
+  kAvgFilter,  // Recursive average filter.
+  kMaxFilter   // Max-window filter, over the time interval of:
+               // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
 };
 
 // Thresholds for hybrid NACK/FEC
 // common to media optimization and the jitter buffer.
 const int64_t kLowRttNackMs = 20;
 
-struct VCMProtectionParameters
-{
-    VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f),
-        packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f),
-        keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0),
-        codecWidth(0), codecHeight(0),
-        numLayers(1)
-        {}
+struct VCMProtectionParameters {
+  VCMProtectionParameters()
+      : rtt(0),
+        lossPr(0.0f),
+        bitRate(0.0f),
+        packetsPerFrame(0.0f),
+        packetsPerFrameKey(0.0f),
+        frameRate(0.0f),
+        keyFrameSize(0.0f),
+        fecRateDelta(0),
+        fecRateKey(0),
+        codecWidth(0),
+        codecHeight(0),
+        numLayers(1) {}
 
-    int64_t             rtt;
-    float               lossPr;
-    float               bitRate;
-    float               packetsPerFrame;
-    float               packetsPerFrameKey;
-    float               frameRate;
-    float               keyFrameSize;
-    uint8_t       fecRateDelta;
-    uint8_t       fecRateKey;
-    uint16_t      codecWidth;
-    uint16_t      codecHeight;
-    int                 numLayers;
+  int64_t rtt;
+  float lossPr;
+  float bitRate;
+  float packetsPerFrame;
+  float packetsPerFrameKey;
+  float frameRate;
+  float keyFrameSize;
+  uint8_t fecRateDelta;
+  uint8_t fecRateKey;
+  uint16_t codecWidth;
+  uint16_t codecHeight;
+  int numLayers;
 };
 
-
 /******************************/
 /* VCMProtectionMethod class  */
 /******************************/
 
-enum VCMProtectionMethodEnum
-{
-    kNack,
-    kFec,
-    kNackFec,
-    kNone
+enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
+
+class VCMLossProbabilitySample {
+ public:
+  VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
+
+  uint8_t lossPr255;
+  int64_t timeMs;
 };
 
-class VCMLossProbabilitySample
-{
-public:
-    VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
+class VCMProtectionMethod {
+ public:
+  VCMProtectionMethod();
+  virtual ~VCMProtectionMethod();
 
-    uint8_t     lossPr255;
-    int64_t     timeMs;
+  // Updates the efficiency of the method using the parameters provided
+  //
+  // Input:
+  //         - parameters         : Parameters used to calculate efficiency
+  //
+  // Return value                 : True if this method is recommended in
+  //                                the given conditions.
+  virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+  // Returns the protection type
+  //
+  // Return value                 : The protection type
+  enum VCMProtectionMethodEnum Type() const { return _type; }
+
+  // Returns the effective packet loss for ER, required by this protection
+  // method
+  //
+  // Return value                 : Required effective packet loss
+  virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
+
+  // Extracts the FEC protection factor for Key frame, required by this
+  // protection method
+  //
+  // Return value                 : Required protectionFactor for Key frame
+  virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
+
+  // Extracts the FEC protection factor for Delta frame, required by this
+  // protection method
+  //
+  // Return value                 : Required protectionFactor for delta frame
+  virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
+
+  // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+  //
+  // Return value                 : Required Unequal protection on/off state.
+  virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
+
+  // Extracts whether the the FEC Unequal protection (UEP) is used for Delta
+  // frame.
+  //
+  // Return value                 : Required Unequal protection on/off state.
+  virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
+
+  virtual int MaxFramesFec() const { return 1; }
+
+  // Updates content metrics
+  void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
+
+ protected:
+  uint8_t _effectivePacketLoss;
+  uint8_t _protectionFactorK;
+  uint8_t _protectionFactorD;
+  // Estimation of residual loss after the FEC
+  float _scaleProtKey;
+  int32_t _maxPayloadSize;
+
+  VCMQmRobustness* _qmRobustness;
+  bool _useUepProtectionK;
+  bool _useUepProtectionD;
+  float _corrFecCost;
+  enum VCMProtectionMethodEnum _type;
 };
 
-
-class VCMProtectionMethod
-{
-public:
-    VCMProtectionMethod();
-    virtual ~VCMProtectionMethod();
-
-    // Updates the efficiency of the method using the parameters provided
-    //
-    // Input:
-    //         - parameters         : Parameters used to calculate efficiency
-    //
-    // Return value                 : True if this method is recommended in
-    //                                the given conditions.
-    virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
-
-    // Returns the protection type
-    //
-    // Return value                 : The protection type
-    enum VCMProtectionMethodEnum Type() const { return _type; }
-
-    // Returns the effective packet loss for ER, required by this protection method
-    //
-    // Return value                 : Required effective packet loss
-    virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
-
-    // Extracts the FEC protection factor for Key frame, required by this protection method
-    //
-    // Return value                 : Required protectionFactor for Key frame
-    virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
-
-    // Extracts the FEC protection factor for Delta frame, required by this protection method
-    //
-    // Return value                 : Required protectionFactor for delta frame
-    virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
-
-    // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
-    //
-    // Return value                 : Required Unequal protection on/off state.
-    virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
-
-    // Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
-    //
-    // Return value                 : Required Unequal protection on/off state.
-    virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
-
-    virtual int MaxFramesFec() const { return 1; }
-
-    // Updates content metrics
-    void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
-
-protected:
-
-    uint8_t                        _effectivePacketLoss;
-    uint8_t                        _protectionFactorK;
-    uint8_t                        _protectionFactorD;
-    // Estimation of residual loss after the FEC
-    float                                _scaleProtKey;
-    int32_t                        _maxPayloadSize;
-
-    VCMQmRobustness*                     _qmRobustness;
-    bool                                 _useUepProtectionK;
-    bool                                 _useUepProtectionD;
-    float                                _corrFecCost;
-    enum VCMProtectionMethodEnum         _type;
+class VCMNackMethod : public VCMProtectionMethod {
+ public:
+  VCMNackMethod();
+  virtual ~VCMNackMethod();
+  virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+  // Get the effective packet loss
+  bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
 };
 
-class VCMNackMethod : public VCMProtectionMethod
-{
-public:
-    VCMNackMethod();
-    virtual ~VCMNackMethod();
-    virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
-    // Get the effective packet loss
-    bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+class VCMFecMethod : public VCMProtectionMethod {
+ public:
+  VCMFecMethod();
+  virtual ~VCMFecMethod();
+  virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+  // Get the effective packet loss for ER
+  bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+  // Get the FEC protection factors
+  bool ProtectionFactor(const VCMProtectionParameters* parameters);
+  // Get the boost for key frame protection
+  uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+                           uint8_t packetFrameKey) const;
+  // Convert the rates: defined relative to total# packets or source# packets
+  uint8_t ConvertFECRate(uint8_t codeRate) const;
+  // Get the average effective recovery from FEC: for random loss model
+  float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+  // Update FEC with protectionFactorD
+  void UpdateProtectionFactorD(uint8_t protectionFactorD);
+  // Update FEC with protectionFactorK
+  void UpdateProtectionFactorK(uint8_t protectionFactorK);
+  // Compute the bits per frame. Account for temporal layers when applicable.
+  int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+ protected:
+  enum { kUpperLimitFramesFec = 6 };
+  // Thresholds values for the bytes/frame and round trip time, below which we
+  // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
+  // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+  enum { kMaxBytesPerFrameForFec = 700 };
+  // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+  enum { kMaxBytesPerFrameForFecLow = 400 };
+  // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+  enum { kMaxBytesPerFrameForFecHigh = 1000 };
 };
 
-class VCMFecMethod : public VCMProtectionMethod
-{
-public:
-    VCMFecMethod();
-    virtual ~VCMFecMethod();
-    virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
-    // Get the effective packet loss for ER
-    bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
-    // Get the FEC protection factors
-    bool ProtectionFactor(const VCMProtectionParameters* parameters);
-    // Get the boost for key frame protection
-    uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
-                                   uint8_t packetFrameKey) const;
-    // Convert the rates: defined relative to total# packets or source# packets
-    uint8_t ConvertFECRate(uint8_t codeRate) const;
-    // Get the average effective recovery from FEC: for random loss model
-    float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
-    // Update FEC with protectionFactorD
-    void UpdateProtectionFactorD(uint8_t protectionFactorD);
-    // Update FEC with protectionFactorK
-    void UpdateProtectionFactorK(uint8_t protectionFactorK);
-    // Compute the bits per frame. Account for temporal layers when applicable.
-    int BitsPerFrame(const VCMProtectionParameters* parameters);
+class VCMNackFecMethod : public VCMFecMethod {
+ public:
+  VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+                   int64_t highRttNackThresholdMs);
+  virtual ~VCMNackFecMethod();
+  virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+  // Get the effective packet loss for ER
+  bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+  // Get the protection factors
+  bool ProtectionFactor(const VCMProtectionParameters* parameters);
+  // Get the max number of frames the FEC is allowed to be based on.
+  int MaxFramesFec() const;
+  // Turn off the FEC based on low bitrate and other factors.
+  bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
 
-protected:
-    enum { kUpperLimitFramesFec = 6 };
-    // Thresholds values for the bytes/frame and round trip time, below which we
-    // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
-    // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
-    enum { kMaxBytesPerFrameForFec = 700 };
-    // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
-    enum { kMaxBytesPerFrameForFecLow = 400 };
-    // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
-    enum { kMaxBytesPerFrameForFecHigh = 1000 };
+ private:
+  int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+  int64_t _lowRttNackMs;
+  int64_t _highRttNackMs;
+  int _maxFramesFec;
 };
 
+class VCMLossProtectionLogic {
+ public:
+  explicit VCMLossProtectionLogic(int64_t nowMs);
+  ~VCMLossProtectionLogic();
 
-class VCMNackFecMethod : public VCMFecMethod
-{
-public:
-    VCMNackFecMethod(int64_t lowRttNackThresholdMs,
-                     int64_t highRttNackThresholdMs);
-    virtual ~VCMNackFecMethod();
-    virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
-    // Get the effective packet loss for ER
-    bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
-    // Get the protection factors
-    bool ProtectionFactor(const VCMProtectionParameters* parameters);
-    // Get the max number of frames the FEC is allowed to be based on.
-    int MaxFramesFec() const;
-    // Turn off the FEC based on low bitrate and other factors.
-    bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
-private:
-    int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+  // Set the protection method to be used
+  //
+  // Input:
+  //        - newMethodType    : New requested protection method type. If one
+  //                           is already set, it will be deleted and replaced
+  void SetMethod(VCMProtectionMethodEnum newMethodType);
 
-    int64_t _lowRttNackMs;
-    int64_t _highRttNackMs;
-    int _maxFramesFec;
-};
+  // Update the round-trip time
+  //
+  // Input:
+  //          - rtt           : Round-trip time in seconds.
+  void UpdateRtt(int64_t rtt);
 
-class VCMLossProtectionLogic
-{
-public:
-    VCMLossProtectionLogic(int64_t nowMs);
-    ~VCMLossProtectionLogic();
+  // Update the filtered packet loss.
+  //
+  // Input:
+  //          - packetLossEnc :  The reported packet loss filtered
+  //                             (max window or average)
+  void UpdateFilteredLossPr(uint8_t packetLossEnc);
 
-    // Set the protection method to be used
-    //
-    // Input:
-    //        - newMethodType    : New requested protection method type. If one
-    //                           is already set, it will be deleted and replaced
-    void SetMethod(VCMProtectionMethodEnum newMethodType);
+  // Update the current target bit rate.
+  //
+  // Input:
+  //          - bitRate          : The current target bit rate in kbits/s
+  void UpdateBitRate(float bitRate);
 
-    // Update the round-trip time
-    //
-    // Input:
-    //          - rtt           : Round-trip time in seconds.
-    void UpdateRtt(int64_t rtt);
+  // Update the number of packets per frame estimate, for delta frames
+  //
+  // Input:
+  //          - nPackets         : Number of packets in the latest sent frame.
+  void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
 
-    // Update the filtered packet loss.
-    //
-    // Input:
-    //          - packetLossEnc :  The reported packet loss filtered
-    //                             (max window or average)
-    void UpdateFilteredLossPr(uint8_t packetLossEnc);
+  // Update the number of packets per frame estimate, for key frames
+  //
+  // Input:
+  //          - nPackets         : umber of packets in the latest sent frame.
+  void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
 
-    // Update the current target bit rate.
-    //
-    // Input:
-    //          - bitRate          : The current target bit rate in kbits/s
-    void UpdateBitRate(float bitRate);
+  // Update the keyFrameSize estimate
+  //
+  // Input:
+  //          - keyFrameSize     : The size of the latest sent key frame.
+  void UpdateKeyFrameSize(float keyFrameSize);
 
-    // Update the number of packets per frame estimate, for delta frames
-    //
-    // Input:
-    //          - nPackets         : Number of packets in the latest sent frame.
-    void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+  // Update the frame rate
+  //
+  // Input:
+  //          - frameRate        : The current target frame rate.
+  void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
 
-   // Update the number of packets per frame estimate, for key frames
-    //
-    // Input:
-    //          - nPackets         : umber of packets in the latest sent frame.
-    void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+  // Update the frame size
+  //
+  // Input:
+  //          - width        : The codec frame width.
+  //          - height       : The codec frame height.
+  void UpdateFrameSize(uint16_t width, uint16_t height);
 
-    // Update the keyFrameSize estimate
-    //
-    // Input:
-    //          - keyFrameSize     : The size of the latest sent key frame.
-    void UpdateKeyFrameSize(float keyFrameSize);
+  // Update the number of active layers
+  //
+  // Input:
+  //          - numLayers    : Number of layers used.
+  void UpdateNumLayers(int numLayers);
 
-    // Update the frame rate
-    //
-    // Input:
-    //          - frameRate        : The current target frame rate.
-    void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+  // The amount of packet loss to cover for with FEC.
+  //
+  // Input:
+  //          - fecRateKey      : Packet loss to cover for with FEC when
+  //                              sending key frames.
+  //          - fecRateDelta    : Packet loss to cover for with FEC when
+  //                              sending delta frames.
+  void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
+    _fecRateKey = fecRateKey;
+    _fecRateDelta = fecRateDelta;
+  }
 
-    // Update the frame size
-    //
-    // Input:
-    //          - width        : The codec frame width.
-    //          - height       : The codec frame height.
-    void UpdateFrameSize(uint16_t width, uint16_t height);
+  // Update the protection methods with the current VCMProtectionParameters
+  // and set the requested protection settings.
+  // Return value     : Returns true on update
+  bool UpdateMethod();
 
-    // Update the number of active layers
-    //
-    // Input:
-    //          - numLayers    : Number of layers used.
-    void UpdateNumLayers(int numLayers);
+  // Returns the method currently selected.
+  //
+  // Return value                 : The protection method currently selected.
+  VCMProtectionMethod* SelectedMethod() const;
 
-    // The amount of packet loss to cover for with FEC.
-    //
-    // Input:
-    //          - fecRateKey      : Packet loss to cover for with FEC when
-    //                              sending key frames.
-    //          - fecRateDelta    : Packet loss to cover for with FEC when
-    //                              sending delta frames.
-    void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta)
-                       { _fecRateKey = fecRateKey;
-                         _fecRateDelta = fecRateDelta; }
+  // Return the protection type of the currently selected method
+  VCMProtectionMethodEnum SelectedType() const;
 
-    // Update the protection methods with the current VCMProtectionParameters
-    // and set the requested protection settings.
-    // Return value     : Returns true on update
-    bool UpdateMethod();
+  // Updates the filtered loss for the average and max window packet loss,
+  // and returns the filtered loss probability in the interval [0, 255].
+  // The returned filtered loss value depends on the parameter |filter_mode|.
+  // The input parameter |lossPr255| is the received packet loss.
 
-    // Returns the method currently selected.
-    //
-    // Return value                 : The protection method currently selected.
-    VCMProtectionMethod* SelectedMethod() const;
+  // Return value                 : The filtered loss probability
+  uint8_t FilteredLoss(int64_t nowMs,
+                       FilterPacketLossMode filter_mode,
+                       uint8_t lossPr255);
 
-    // Return the protection type of the currently selected method
-    VCMProtectionMethodEnum SelectedType() const;
+  void Reset(int64_t nowMs);
 
-    // Updates the filtered loss for the average and max window packet loss,
-    // and returns the filtered loss probability in the interval [0, 255].
-    // The returned filtered loss value depends on the parameter |filter_mode|.
-    // The input parameter |lossPr255| is the received packet loss.
+  void Release();
 
-    // Return value                 : The filtered loss probability
-    uint8_t FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode,
-                               uint8_t lossPr255);
-
-    void Reset(int64_t nowMs);
-
-    void Release();
-
-private:
-    // Sets the available loss protection methods.
-    void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
-    uint8_t MaxFilteredLossPr(int64_t nowMs) const;
-    rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
-    VCMProtectionParameters _currentParameters;
-    int64_t _rtt;
-    float _lossPr;
-    float _bitRate;
-    float _frameRate;
-    float _keyFrameSize;
-    uint8_t _fecRateKey;
-    uint8_t _fecRateDelta;
-    int64_t _lastPrUpdateT;
-    int64_t _lastPacketPerFrameUpdateT;
-    int64_t _lastPacketPerFrameUpdateTKey;
-    rtc::ExpFilter _lossPr255;
-    VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
-    uint8_t _shortMaxLossPr255;
-    rtc::ExpFilter _packetsPerFrame;
-    rtc::ExpFilter _packetsPerFrameKey;
-    uint16_t _codecWidth;
-    uint16_t _codecHeight;
-    int _numLayers;
+ private:
+  // Sets the available loss protection methods.
+  void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+  uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+  rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
+  VCMProtectionParameters _currentParameters;
+  int64_t _rtt;
+  float _lossPr;
+  float _bitRate;
+  float _frameRate;
+  float _keyFrameSize;
+  uint8_t _fecRateKey;
+  uint8_t _fecRateDelta;
+  int64_t _lastPrUpdateT;
+  int64_t _lastPacketPerFrameUpdateT;
+  int64_t _lastPacketPerFrameUpdateTKey;
+  rtc::ExpFilter _lossPr255;
+  VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+  uint8_t _shortMaxLossPr255;
+  rtc::ExpFilter _packetsPerFrame;
+  rtc::ExpFilter _packetsPerFrameKey;
+  uint16_t _codecWidth;
+  uint16_t _codecHeight;
+  int _numLayers;
 };
 
 }  // namespace media_optimization
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/media_optimization.cc b/webrtc/modules/video_coding/media_optimization.cc
index 3ebebef..a234a06 100644
--- a/webrtc/modules/video_coding/media_optimization.cc
+++ b/webrtc/modules/video_coding/media_optimization.cc
@@ -53,11 +53,9 @@
   key_fec_params.fec_mask_type = kFecMaskRandom;
 
   // TODO(Marco): Pass FEC protection values per layer.
-  video_protection_callback->ProtectionRequest(&delta_fec_params,
-                                               &key_fec_params,
-                                               video_rate_bps,
-                                               nack_overhead_rate_bps,
-                                               fec_overhead_rate_bps);
+  video_protection_callback->ProtectionRequest(
+      &delta_fec_params, &key_fec_params, video_rate_bps,
+      nack_overhead_rate_bps, fec_overhead_rate_bps);
 }
 }  // namespace
 
@@ -115,8 +113,8 @@
 
 void MediaOptimization::Reset() {
   CriticalSectionScoped lock(crit_sect_.get());
-  SetEncodingDataInternal(
-      kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
+  SetEncodingDataInternal(kVideoCodecUnknown, 0, 0, 0, 0, 0, 0,
+                          max_payload_size_);
   memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
   incoming_frame_rate_ = 0.0;
   frame_dropper_->Reset();
@@ -149,14 +147,8 @@
                                         int num_layers,
                                         int32_t mtu) {
   CriticalSectionScoped lock(crit_sect_.get());
-  SetEncodingDataInternal(send_codec_type,
-                          max_bit_rate,
-                          frame_rate,
-                          target_bitrate,
-                          width,
-                          height,
-                          num_layers,
-                          mtu);
+  SetEncodingDataInternal(send_codec_type, max_bit_rate, frame_rate,
+                          target_bitrate, width, height, num_layers, mtu);
 }
 
 void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
@@ -190,11 +182,8 @@
   codec_height_ = height;
   num_layers_ = (num_layers <= 1) ? 1 : num_layers;  // Can also be zero.
   max_payload_size_ = mtu;
-  qm_resolution_->Initialize(target_bitrate_kbps,
-                             user_frame_rate_,
-                             codec_width_,
-                             codec_height_,
-                             num_layers_);
+  qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
+                             codec_width_, codec_height_, num_layers_);
 }
 
 uint32_t MediaOptimization::SetTargetRates(
@@ -256,10 +245,8 @@
     // overhead data actually transmitted (including headers) the last
     // second.
     if (protection_callback) {
-      UpdateProtectionCallback(selected_method,
-                               &sent_video_rate_bps,
-                               &sent_nack_rate_bps,
-                               &sent_fec_rate_bps,
+      UpdateProtectionCallback(selected_method, &sent_video_rate_bps,
+                               &sent_nack_rate_bps, &sent_fec_rate_bps,
                                protection_callback);
     }
     uint32_t sent_total_rate_bps =
@@ -296,10 +283,8 @@
 
   if (enable_qm_ && qmsettings_callback) {
     // Update QM with rates.
-    qm_resolution_->UpdateRates(target_video_bitrate_kbps,
-                                sent_video_rate_kbps,
-                                incoming_frame_rate_,
-                                fraction_lost_);
+    qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
+                                incoming_frame_rate_, fraction_lost_);
     // Check for QM selection.
     bool select_qm = CheckStatusForQMchange();
     if (select_qm) {
@@ -514,8 +499,7 @@
   }
   size_t framesize_sum = 0;
   for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
-       it != encoded_frame_samples_.end();
-       ++it) {
+       it != encoded_frame_samples_.end(); ++it) {
     framesize_sum += it->size_bytes;
   }
   float denom = static_cast<float>(
@@ -565,7 +549,8 @@
   }
 
   LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
-                  "to " << qm->codec_width << "x" << qm->codec_height << "@"
+                  "to "
+               << qm->codec_width << "x" << qm->codec_height << "@"
                << qm->frame_rate;
 
   // Update VPM with new target frame rate and frame size.
@@ -574,11 +559,11 @@
   // will vary/fluctuate, and since we don't want to change the state of the
   // VPM frame dropper, unless a temporal action was selected, we use the
   // quantity |qm->frame_rate| for updating.
-  video_qmsettings_callback->SetVideoQMSettings(
-      qm->frame_rate, codec_width_, codec_height_);
+  video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
+                                                codec_height_);
   content_->UpdateFrameRate(qm->frame_rate);
-  qm_resolution_->UpdateCodecParameters(
-      qm->frame_rate, codec_width_, codec_height_);
+  qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
+                                        codec_height_);
   return true;
 }
 
diff --git a/webrtc/modules/video_coding/media_optimization.h b/webrtc/modules/video_coding/media_optimization.h
index 7bbdd37..54389bf 100644
--- a/webrtc/modules/video_coding/media_optimization.h
+++ b/webrtc/modules/video_coding/media_optimization.h
@@ -85,15 +85,9 @@
   uint32_t SentBitRate();
 
  private:
-  enum {
-    kFrameCountHistorySize = 90
-  };
-  enum {
-    kFrameHistoryWinMs = 2000
-  };
-  enum {
-    kBitrateAverageWinMs = 1000
-  };
+  enum { kFrameCountHistorySize = 90 };
+  enum { kFrameHistoryWinMs = 2000 };
+  enum { kBitrateAverageWinMs = 1000 };
 
   struct EncodedFrameSample;
   typedef std::list<EncodedFrameSample> FrameSampleList;
diff --git a/webrtc/modules/video_coding/media_optimization_unittest.cc b/webrtc/modules/video_coding/media_optimization_unittest.cc
index f8bc533..3f8ac5d 100644
--- a/webrtc/modules/video_coding/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/media_optimization_unittest.cc
@@ -51,7 +51,6 @@
   uint32_t next_timestamp_;
 };
 
-
 TEST_F(TestMediaOptimization, VerifyMuting) {
   // Enable video suspension with these limits.
   // Suspend the video when the rate is below 50 kbps and resume when it gets
diff --git a/webrtc/modules/video_coding/nack_fec_tables.h b/webrtc/modules/video_coding/nack_fec_tables.h
index 54265ce..f9f5ad9 100644
--- a/webrtc/modules/video_coding/nack_fec_tables.h
+++ b/webrtc/modules/video_coding/nack_fec_tables.h
@@ -11,116 +11,21 @@
 #ifndef WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
 #define WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
 
-namespace webrtc
-{
+namespace webrtc {
 
 // Table for adjusting FEC rate for NACK/FEC protection method
 // Table values are built as a sigmoid function, ranging from 0 to 100, based on
 // the HybridNackTH values defined in media_opt_util.h.
 const uint16_t VCMNackFecTable[100] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-1,
-1,
-1,
-1,
-1,
-2,
-2,
-2,
-3,
-3,
-4,
-5,
-6,
-7,
-9,
-10,
-12,
-15,
-18,
-21,
-24,
-28,
-32,
-37,
-41,
-46,
-51,
-56,
-61,
-66,
-70,
-74,
-78,
-81,
-84,
-86,
-89,
-90,
-92,
-93,
-95,
-95,
-96,
-97,
-97,
-98,
-98,
-99,
-99,
-99,
-99,
-99,
-99,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-
+    0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   1,   1,   1,   1,
+    1,   2,   2,   2,   3,   3,   4,   5,   6,   7,   9,   10,  12,  15,  18,
+    21,  24,  28,  32,  37,  41,  46,  51,  56,  61,  66,  70,  74,  78,  81,
+    84,  86,  89,  90,  92,  93,  95,  95,  96,  97,  97,  98,  98,  99,  99,
+    99,  99,  99,  99,  100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+    100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+    100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/packet.cc b/webrtc/modules/video_coding/packet.cc
index 282215d..e25de2e 100644
--- a/webrtc/modules/video_coding/packet.cc
+++ b/webrtc/modules/video_coding/packet.cc
@@ -8,11 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "webrtc/modules/include/module_common_types.h"
 #include "webrtc/modules/video_coding/packet.h"
 
 #include <assert.h>
 
+#include "webrtc/modules/include/module_common_types.h"
+
 namespace webrtc {
 
 VCMPacket::VCMPacket()
@@ -34,49 +35,47 @@
 
 VCMPacket::VCMPacket(const uint8_t* ptr,
                      const size_t size,
-                     const WebRtcRTPHeader& rtpHeader) :
-    payloadType(rtpHeader.header.payloadType),
-    timestamp(rtpHeader.header.timestamp),
-    ntp_time_ms_(rtpHeader.ntp_time_ms),
-    seqNum(rtpHeader.header.sequenceNumber),
-    dataPtr(ptr),
-    sizeBytes(size),
-    markerBit(rtpHeader.header.markerBit),
+                     const WebRtcRTPHeader& rtpHeader)
+    : payloadType(rtpHeader.header.payloadType),
+      timestamp(rtpHeader.header.timestamp),
+      ntp_time_ms_(rtpHeader.ntp_time_ms),
+      seqNum(rtpHeader.header.sequenceNumber),
+      dataPtr(ptr),
+      sizeBytes(size),
+      markerBit(rtpHeader.header.markerBit),
 
-    frameType(rtpHeader.frameType),
-    codec(kVideoCodecUnknown),
-    isFirstPacket(rtpHeader.type.Video.isFirstPacket),
-    completeNALU(kNaluComplete),
-    insertStartCode(false),
-    width(rtpHeader.type.Video.width),
-    height(rtpHeader.type.Video.height),
-    codecSpecificHeader(rtpHeader.type.Video)
-{
-    CopyCodecSpecifics(rtpHeader.type.Video);
+      frameType(rtpHeader.frameType),
+      codec(kVideoCodecUnknown),
+      isFirstPacket(rtpHeader.type.Video.isFirstPacket),
+      completeNALU(kNaluComplete),
+      insertStartCode(false),
+      width(rtpHeader.type.Video.width),
+      height(rtpHeader.type.Video.height),
+      codecSpecificHeader(rtpHeader.type.Video) {
+  CopyCodecSpecifics(rtpHeader.type.Video);
 }
 
 VCMPacket::VCMPacket(const uint8_t* ptr,
                      size_t size,
                      uint16_t seq,
                      uint32_t ts,
-                     bool mBit) :
-    payloadType(0),
-    timestamp(ts),
-    ntp_time_ms_(0),
-    seqNum(seq),
-    dataPtr(ptr),
-    sizeBytes(size),
-    markerBit(mBit),
+                     bool mBit)
+    : payloadType(0),
+      timestamp(ts),
+      ntp_time_ms_(0),
+      seqNum(seq),
+      dataPtr(ptr),
+      sizeBytes(size),
+      markerBit(mBit),
 
-    frameType(kVideoFrameDelta),
-    codec(kVideoCodecUnknown),
-    isFirstPacket(false),
-    completeNALU(kNaluComplete),
-    insertStartCode(false),
-    width(0),
-    height(0),
-    codecSpecificHeader()
-{}
+      frameType(kVideoFrameDelta),
+      codec(kVideoCodecUnknown),
+      isFirstPacket(false),
+      completeNALU(kNaluComplete),
+      insertStartCode(false),
+      width(0),
+      height(0),
+      codecSpecificHeader() {}
 
 void VCMPacket::Reset() {
   payloadType = 0;
diff --git a/webrtc/modules/video_coding/packet.h b/webrtc/modules/video_coding/packet.h
index 9f00e8e..b77c1df 100644
--- a/webrtc/modules/video_coding/packet.h
+++ b/webrtc/modules/video_coding/packet.h
@@ -18,42 +18,42 @@
 namespace webrtc {
 
 class VCMPacket {
-public:
-    VCMPacket();
-    VCMPacket(const uint8_t* ptr,
-              const size_t size,
-              const WebRtcRTPHeader& rtpHeader);
-    VCMPacket(const uint8_t* ptr,
-              size_t size,
-              uint16_t seqNum,
-              uint32_t timestamp,
-              bool markerBit);
+ public:
+  VCMPacket();
+  VCMPacket(const uint8_t* ptr,
+            const size_t size,
+            const WebRtcRTPHeader& rtpHeader);
+  VCMPacket(const uint8_t* ptr,
+            size_t size,
+            uint16_t seqNum,
+            uint32_t timestamp,
+            bool markerBit);
 
-    void Reset();
+  void Reset();
 
-    uint8_t           payloadType;
-    uint32_t          timestamp;
-    // NTP time of the capture time in local timebase in milliseconds.
-    int64_t ntp_time_ms_;
-    uint16_t          seqNum;
-    const uint8_t*    dataPtr;
-    size_t          sizeBytes;
-    bool                    markerBit;
+  uint8_t payloadType;
+  uint32_t timestamp;
+  // NTP time of the capture time in local timebase in milliseconds.
+  int64_t ntp_time_ms_;
+  uint16_t seqNum;
+  const uint8_t* dataPtr;
+  size_t sizeBytes;
+  bool markerBit;
 
-    FrameType               frameType;
-    VideoCodecType          codec;
+  FrameType frameType;
+  VideoCodecType codec;
 
-    bool isFirstPacket;                 // Is this first packet in a frame.
-    VCMNaluCompleteness completeNALU;   // Default is kNaluIncomplete.
-    bool insertStartCode;               // True if a start code should be inserted before this
-                                        // packet.
-    int width;
-    int height;
-    RTPVideoHeader codecSpecificHeader;
+  bool isFirstPacket;                // Is this first packet in a frame.
+  VCMNaluCompleteness completeNALU;  // Default is kNaluIncomplete.
+  bool insertStartCode;  // True if a start code should be inserted before this
+                         // packet.
+  int width;
+  int height;
+  RTPVideoHeader codecSpecificHeader;
 
-protected:
-    void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
+ protected:
+  void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
 };
 
 }  // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/qm_select.cc b/webrtc/modules/video_coding/qm_select.cc
index fb261fe..9da42bb 100644
--- a/webrtc/modules/video_coding/qm_select.cc
+++ b/webrtc/modules/video_coding/qm_select.cc
@@ -36,8 +36,7 @@
   ResetQM();
 }
 
-VCMQmMethod::~VCMQmMethod() {
-}
+VCMQmMethod::~VCMQmMethod() {}
 
 void VCMQmMethod::ResetQM() {
   aspect_ratio_ = 1.0f;
@@ -52,7 +51,7 @@
   return content_class_ = 3 * motion_.level + spatial_.level;
 }
 
-void VCMQmMethod::UpdateContent(const VideoContentMetrics*  contentMetrics) {
+void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
   content_metrics_ = contentMetrics;
 }
 
@@ -64,7 +63,7 @@
   if (motion_.value < kLowMotionNfd) {
     motion_.level = kLow;
   } else if (motion_.value > kHighMotionNfd) {
-    motion_.level  = kHigh;
+    motion_.level = kHigh;
   } else {
     motion_.level = kDefault;
   }
@@ -75,7 +74,7 @@
   float spatial_err_h = 0.0;
   float spatial_err_v = 0.0;
   if (content_metrics_) {
-    spatial_err =  content_metrics_->spatial_pred_err;
+    spatial_err = content_metrics_->spatial_pred_err;
     spatial_err_h = content_metrics_->spatial_pred_err_h;
     spatial_err_v = content_metrics_->spatial_pred_err_v;
   }
@@ -94,8 +93,7 @@
   }
 }
 
-ImageType VCMQmMethod::GetImageType(uint16_t width,
-                                    uint16_t height) {
+ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
   // Get the image type for the encoder frame size.
   uint32_t image_size = width * height;
   if (image_size == kSizeOfImageType[kQCIF]) {
@@ -142,7 +140,7 @@
   } else if (avg_framerate <= kMiddleFrameRate) {
     return kFrameRateMiddle1;
   } else if (avg_framerate <= kHighFrameRate) {
-     return kFrameRateMiddle2;
+    return kFrameRateMiddle2;
   } else {
     return kFrameRateHigh;
   }
@@ -150,8 +148,7 @@
 
 // RESOLUTION CLASS
 
-VCMQmResolution::VCMQmResolution()
-    :  qm_(new VCMResolutionScale()) {
+VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
   Reset();
 }
 
@@ -174,7 +171,7 @@
 
 void VCMQmResolution::ResetDownSamplingState() {
   state_dec_factor_spatial_ = 1.0;
-  state_dec_factor_temporal_  = 1.0;
+  state_dec_factor_temporal_ = 1.0;
   for (int i = 0; i < kDownActionHistorySize; i++) {
     down_action_history_[i].spatial = kNoChangeSpatial;
     down_action_history_[i].temporal = kNoChangeTemporal;
@@ -225,11 +222,12 @@
   buffer_level_ = kInitBufferLevel * target_bitrate_;
   // Per-frame bandwidth.
   per_frame_bandwidth_ = target_bitrate_ / user_framerate;
-  init_  = true;
+  init_ = true;
   return VCM_OK;
 }
 
-void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
+void VCMQmResolution::UpdateCodecParameters(float frame_rate,
+                                            uint16_t width,
                                             uint16_t height) {
   width_ = width;
   height_ = height;
@@ -283,12 +281,12 @@
 
   // Update with the current new target and frame rate:
   // these values are ones the encoder will use for the current/next ~1sec.
-  target_bitrate_ =  target_bitrate;
+  target_bitrate_ = target_bitrate;
   incoming_framerate_ = incoming_framerate;
   sum_incoming_framerate_ += incoming_framerate_;
   // Update the per_frame_bandwidth:
   // this is the per_frame_bw for the current/next ~1sec.
-  per_frame_bandwidth_  = 0.0f;
+  per_frame_bandwidth_ = 0.0f;
   if (incoming_framerate_ > 0.0f) {
     per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
   }
@@ -313,7 +311,7 @@
   }
   if (content_metrics_ == NULL) {
     Reset();
-    *qm =  qm_;
+    *qm = qm_;
     return VCM_OK;
   }
 
@@ -376,31 +374,31 @@
   avg_rate_mismatch_sgn_ = 0.0f;
   avg_packet_loss_ = 0.0f;
   if (frame_cnt_ > 0) {
-    avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) /
-        static_cast<float>(frame_cnt_);
+    avg_ratio_buffer_low_ =
+        static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
   }
   if (update_rate_cnt_ > 0) {
-    avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) /
-        static_cast<float>(update_rate_cnt_);
+    avg_rate_mismatch_ =
+        static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
     avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
-        static_cast<float>(update_rate_cnt_);
+                             static_cast<float>(update_rate_cnt_);
     avg_target_rate_ = static_cast<float>(sum_target_rate_) /
-        static_cast<float>(update_rate_cnt_);
+                       static_cast<float>(update_rate_cnt_);
     avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
-        static_cast<float>(update_rate_cnt_);
-    avg_packet_loss_ =  static_cast<float>(sum_packet_loss_) /
-        static_cast<float>(update_rate_cnt_);
+                              static_cast<float>(update_rate_cnt_);
+    avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
+                       static_cast<float>(update_rate_cnt_);
   }
   // For selection we may want to weight some quantities more heavily
   // with the current (i.e., next ~1sec) rate values.
-  avg_target_rate_ = kWeightRate * avg_target_rate_ +
-      (1.0 - kWeightRate) * target_bitrate_;
+  avg_target_rate_ =
+      kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
   avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
-      (1.0 - kWeightRate) * incoming_framerate_;
+                            (1.0 - kWeightRate) * incoming_framerate_;
   // Use base layer frame rate for temporal layers: this will favor spatial.
   assert(num_layers_ > 0);
-  framerate_level_ = FrameRateLevel(
-      avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1)));
+  framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
+                                    static_cast<float>(1 << (num_layers_ - 1)));
 }
 
 void VCMQmResolution::ComputeEncoderState() {
@@ -412,7 +410,7 @@
   // 2) rate mis-match is high, and consistent over-shooting by encoder.
   if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
       ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
-          (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
+       (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
     encoder_state_ = kStressedEncoding;
   }
   // Assign easy state if:
@@ -435,9 +433,9 @@
   // Modify the fac_width/height for this case.
   if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
     fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
-        kFactorWidthSpatial[kOneHalfSpatialUniform];
+                kFactorWidthSpatial[kOneHalfSpatialUniform];
     fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
-        kFactorHeightSpatial[kOneHalfSpatialUniform];
+                 kFactorHeightSpatial[kOneHalfSpatialUniform];
   }
 
   // Check if we should go up both spatially and temporally.
@@ -459,8 +457,8 @@
                                               kTransRateScaleUpSpatial);
   }
   if (down_action_history_[0].temporal != kNoChangeTemporal) {
-    selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp,
-                                               kTransRateScaleUpTemp);
+    selected_up_temporal =
+        ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
   }
   if (selected_up_spatial && !selected_up_temporal) {
     action_.spatial = down_action_history_[0].spatial;
@@ -484,13 +482,13 @@
                                           float fac_height,
                                           float fac_temp,
                                           float scale_fac) {
-  float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height,
-                                                         fac_temp, scale_fac);
+  float estimated_transition_rate_up =
+      GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
   // Go back up if:
   // 1) target rate is above threshold and current encoder state is stable, or
   // 2) encoder state is easy (encoder is significantly under-shooting target).
   if (((avg_target_rate_ > estimated_transition_rate_up) &&
-      (encoder_state_ == kStableEncoding)) ||
+       (encoder_state_ == kStableEncoding)) ||
       (encoder_state_ == kEasyEncoding)) {
     return true;
   } else {
@@ -505,7 +503,7 @@
   // Resolution reduction if:
   // (1) target rate is below transition rate, or
   // (2) encoder is in stressed state and target rate below a max threshold.
-  if ((avg_target_rate_ < estimated_transition_rate_down ) ||
+  if ((avg_target_rate_ < estimated_transition_rate_down) ||
       (encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
     // Get the down-sampling action: based on content class, and how low
     // average target rate is relative to transition rate.
@@ -529,9 +527,7 @@
         action_.spatial = kNoChangeSpatial;
         break;
       }
-      default: {
-        assert(false);
-      }
+      default: { assert(false); }
     }
     switch (temp_fact) {
       case 3: {
@@ -546,9 +542,7 @@
         action_.temporal = kNoChangeTemporal;
         break;
       }
-      default: {
-        assert(false);
-      }
+      default: { assert(false); }
     }
     // Only allow for one action (spatial or temporal) at a given time.
     assert(action_.temporal == kNoChangeTemporal ||
@@ -572,9 +566,9 @@
                                          float fac_height,
                                          float fac_temp,
                                          float scale_fac) {
-  ImageType image_type = GetImageType(
-      static_cast<uint16_t>(fac_width * width_),
-      static_cast<uint16_t>(fac_height * height_));
+  ImageType image_type =
+      GetImageType(static_cast<uint16_t>(fac_width * width_),
+                   static_cast<uint16_t>(fac_height * height_));
 
   FrameRateLevelClass framerate_level =
       FrameRateLevel(fac_temp * avg_incoming_framerate_);
@@ -589,13 +583,13 @@
   // Nominal values based on image format (frame size and frame rate).
   float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
 
-  uint8_t image_class = image_type > kVGA ? 1: 0;
+  uint8_t image_class = image_type > kVGA ? 1 : 0;
   uint8_t table_index = image_class * 9 + content_class_;
   // Scale factor for down-sampling transition threshold:
   // factor based on the content class and the image size.
   float scaleTransRate = kScaleTransRateQm[table_index];
   // Threshold bitrate for resolution action.
-  return static_cast<float> (scale_fac * scaleTransRate * max_rate);
+  return static_cast<float>(scale_fac * scaleTransRate * max_rate);
 }
 
 void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
@@ -605,9 +599,9 @@
     // If last spatial action was 1/2x1/2, we undo it in two steps, so the
     // spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
     if (action_.spatial == kOneQuarterSpatialUniform) {
-      qm_->spatial_width_fact =
-          1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] /
-          kFactorWidthSpatial[kOneQuarterSpatialUniform];
+      qm_->spatial_width_fact = 1.0f *
+                                kFactorWidthSpatial[kOneHalfSpatialUniform] /
+                                kFactorWidthSpatial[kOneQuarterSpatialUniform];
       qm_->spatial_height_fact =
           1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
           kFactorHeightSpatial[kOneQuarterSpatialUniform];
@@ -628,17 +622,18 @@
   }
   UpdateCodecResolution();
   state_dec_factor_spatial_ = state_dec_factor_spatial_ *
-      qm_->spatial_width_fact * qm_->spatial_height_fact;
+                              qm_->spatial_width_fact *
+                              qm_->spatial_height_fact;
   state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
 }
 
-void  VCMQmResolution::UpdateCodecResolution() {
+void VCMQmResolution::UpdateCodecResolution() {
   if (action_.spatial != kNoChangeSpatial) {
     qm_->change_resolution_spatial = true;
-    qm_->codec_width = static_cast<uint16_t>(width_ /
-                                             qm_->spatial_width_fact + 0.5f);
-    qm_->codec_height = static_cast<uint16_t>(height_ /
-                                              qm_->spatial_height_fact + 0.5f);
+    qm_->codec_width =
+        static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
+    qm_->codec_height =
+        static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
     // Size should not exceed native sizes.
     assert(qm_->codec_width <= native_width_);
     assert(qm_->codec_height <= native_height_);
@@ -662,8 +657,9 @@
 }
 
 uint8_t VCMQmResolution::RateClass(float transition_rate) {
-  return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0:
-  (avg_target_rate_ >= transition_rate ? 2 : 1);
+  return avg_target_rate_ < (kFacLowRate * transition_rate)
+             ? 0
+             : (avg_target_rate_ >= transition_rate ? 2 : 1);
 }
 
 // TODO(marpan): Would be better to capture these frame rate adjustments by
@@ -698,15 +694,14 @@
   }
   // Never use temporal action if number of temporal layers is above 2.
   if (num_layers_ > 2) {
-    if (action_.temporal !=  kNoChangeTemporal) {
+    if (action_.temporal != kNoChangeTemporal) {
       action_.spatial = kOneHalfSpatialUniform;
     }
     action_.temporal = kNoChangeTemporal;
   }
   // If spatial action was selected, we need to make sure the frame sizes
   // are multiples of two. Otherwise switch to 2/3 temporal.
-  if (action_.spatial != kNoChangeSpatial &&
-      !EvenFrameSize()) {
+  if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
     action_.spatial = kNoChangeSpatial;
     // Only one action (spatial or temporal) is allowed at a given time, so need
     // to check whether temporal action is currently selected.
@@ -722,35 +717,36 @@
     bool found = false;
     int isel = kDownActionHistorySize;
     for (int i = 0; i < kDownActionHistorySize; ++i) {
-      if (down_action_history_[i].spatial ==  kOneHalfSpatialUniform) {
+      if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
         isel = i;
         found = true;
         break;
       }
     }
     if (found) {
-       action_.spatial = kOneQuarterSpatialUniform;
-       state_dec_factor_spatial_ = state_dec_factor_spatial_ /
-           (kFactorWidthSpatial[kOneHalfSpatialUniform] *
-            kFactorHeightSpatial[kOneHalfSpatialUniform]);
-       // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
-       ConstrainAmountOfDownSampling();
-       if (action_.spatial == kNoChangeSpatial) {
-         // Not allowed. Go back to 3/4x3/4 spatial.
-         action_.spatial = kOneHalfSpatialUniform;
-         state_dec_factor_spatial_ = state_dec_factor_spatial_ *
-             kFactorWidthSpatial[kOneHalfSpatialUniform] *
-             kFactorHeightSpatial[kOneHalfSpatialUniform];
-       } else {
-         // Switching is allowed. Remove 3/4x3/4 from the history, and update
-         // the frame size.
-         for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
-           down_action_history_[i].spatial =
-               down_action_history_[i + 1].spatial;
-         }
-         width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
-         height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
-       }
+      action_.spatial = kOneQuarterSpatialUniform;
+      state_dec_factor_spatial_ =
+          state_dec_factor_spatial_ /
+          (kFactorWidthSpatial[kOneHalfSpatialUniform] *
+           kFactorHeightSpatial[kOneHalfSpatialUniform]);
+      // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
+      ConstrainAmountOfDownSampling();
+      if (action_.spatial == kNoChangeSpatial) {
+        // Not allowed. Go back to 3/4x3/4 spatial.
+        action_.spatial = kOneHalfSpatialUniform;
+        state_dec_factor_spatial_ =
+            state_dec_factor_spatial_ *
+            kFactorWidthSpatial[kOneHalfSpatialUniform] *
+            kFactorHeightSpatial[kOneHalfSpatialUniform];
+      } else {
+        // Switching is allowed. Remove 3/4x3/4 from the history, and update
+        // the frame size.
+        for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
+          down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
+        }
+        width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
+        height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
+      }
     }
   }
 }
@@ -815,8 +811,8 @@
   float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
   float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
   float temporal_fact = kFactorTemporal[action_.temporal];
-  float new_dec_factor_spatial = state_dec_factor_spatial_ *
-      spatial_width_fact * spatial_height_fact;
+  float new_dec_factor_spatial =
+      state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
   float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
 
   // No spatial sampling if current frame size is too small, or if the
@@ -908,8 +904,7 @@
   Reset();
 }
 
-VCMQmRobustness::~VCMQmRobustness() {
-}
+VCMQmRobustness::~VCMQmRobustness() {}
 
 void VCMQmRobustness::Reset() {
   prev_total_rate_ = 0.0f;
@@ -928,7 +923,7 @@
                                        int64_t rtt_time,
                                        uint8_t packet_loss) {
   // Default: no adjustment
-  float adjust_fec =  1.0f;
+  float adjust_fec = 1.0f;
   if (content_metrics_ == NULL) {
     return adjust_fec;
   }
@@ -955,4 +950,4 @@
   // Default.
   return false;
 }
-}  // namespace
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/qm_select.h b/webrtc/modules/video_coding/qm_select.h
index 079e7f8..764b5ed 100644
--- a/webrtc/modules/video_coding/qm_select.h
+++ b/webrtc/modules/video_coding/qm_select.h
@@ -30,8 +30,7 @@
         spatial_height_fact(1.0f),
         temporal_fact(1.0f),
         change_resolution_spatial(false),
-        change_resolution_temporal(false) {
-  }
+        change_resolution_temporal(false) {}
   uint16_t codec_width;
   uint16_t codec_height;
   float frame_rate;
@@ -43,20 +42,20 @@
 };
 
 enum ImageType {
-  kQCIF = 0,            // 176x144
-  kHCIF,                // 264x216 = half(~3/4x3/4) CIF.
-  kQVGA,                // 320x240 = quarter VGA.
-  kCIF,                 // 352x288
-  kHVGA,                // 480x360 = half(~3/4x3/4) VGA.
-  kVGA,                 // 640x480
-  kQFULLHD,             // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
-  kWHD,                 // 1280x720
-  kFULLHD,              // 1920x1080
+  kQCIF = 0,  // 176x144
+  kHCIF,      // 264x216 = half(~3/4x3/4) CIF.
+  kQVGA,      // 320x240 = quarter VGA.
+  kCIF,       // 352x288
+  kHVGA,      // 480x360 = half(~3/4x3/4) VGA.
+  kVGA,       // 640x480
+  kQFULLHD,   // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
+  kWHD,       // 1280x720
+  kFULLHD,    // 1920x1080
   kNumImageTypes
 };
 
-const uint32_t kSizeOfImageType[kNumImageTypes] =
-{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 };
+const uint32_t kSizeOfImageType[kNumImageTypes] = {
+    25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
 
 enum FrameRateLevelClass {
   kFrameRateLow,
@@ -65,17 +64,10 @@
   kFrameRateHigh
 };
 
-enum ContentLevelClass {
-  kLow,
-  kHigh,
-  kDefault
-};
+enum ContentLevelClass { kLow, kHigh, kDefault };
 
 struct VCMContFeature {
-  VCMContFeature()
-      : value(0.0f),
-        level(kDefault) {
-  }
+  VCMContFeature() : value(0.0f), level(kDefault) {}
   void Reset() {
     value = 0.0f;
     level = kDefault;
@@ -84,43 +76,34 @@
   ContentLevelClass level;
 };
 
-enum UpDownAction {
-  kUpResolution,
-  kDownResolution
-};
+enum UpDownAction { kUpResolution, kDownResolution };
 
 enum SpatialAction {
   kNoChangeSpatial,
-  kOneHalfSpatialUniform,        // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
-  kOneQuarterSpatialUniform,     // 1/2 x 1/2: 1/4 pixel reduction.
+  kOneHalfSpatialUniform,     // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
+  kOneQuarterSpatialUniform,  // 1/2 x 1/2: 1/4 pixel reduction.
   kNumModesSpatial
 };
 
 enum TemporalAction {
   kNoChangeTemporal,
-  kTwoThirdsTemporal,     // 2/3 frame rate reduction
-  kOneHalfTemporal,       // 1/2 frame rate reduction
+  kTwoThirdsTemporal,  // 2/3 frame rate reduction
+  kOneHalfTemporal,    // 1/2 frame rate reduction
   kNumModesTemporal
 };
 
 struct ResolutionAction {
-  ResolutionAction()
-      : spatial(kNoChangeSpatial),
-        temporal(kNoChangeTemporal) {
-  }
+  ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
   SpatialAction spatial;
   TemporalAction temporal;
 };
 
 // Down-sampling factors for spatial (width and height), and temporal.
-const float kFactorWidthSpatial[kNumModesSpatial] =
-    { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
 
-const float kFactorHeightSpatial[kNumModesSpatial] =
-    { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
 
-const float kFactorTemporal[kNumModesTemporal] =
-    { 1.0f, 1.5f, 2.0f };
+const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
 
 enum EncoderState {
   kStableEncoding,    // Low rate mis-match, stable buffer levels.
@@ -297,7 +280,7 @@
   // Select the directional (1x2 or 2x1) spatial down-sampling action.
   void SelectSpatialDirectionMode(float transition_rate);
 
-  enum { kDownActionHistorySize = 10};
+  enum { kDownActionHistorySize = 10 };
 
   VCMResolutionScale* qm_;
   // Encoder rate control parameters.
diff --git a/webrtc/modules/video_coding/qm_select_data.h b/webrtc/modules/video_coding/qm_select_data.h
index 3f7028a..49190ef 100644
--- a/webrtc/modules/video_coding/qm_select_data.h
+++ b/webrtc/modules/video_coding/qm_select_data.h
@@ -69,36 +69,36 @@
 
 // Frame rate scale for maximum transition rate.
 const float kFrameRateFac[4] = {
-    0.5f,    // Low
-    0.7f,    // Middle level 1
-    0.85f,   // Middle level 2
-    1.0f,    // High
+    0.5f,   // Low
+    0.7f,   // Middle level 1
+    0.85f,  // Middle level 2
+    1.0f,   // High
 };
 
 // Scale for transitional rate: based on content class
 // motion=L/H/D,spatial==L/H/D: for low, high, middle levels
 const float kScaleTransRateQm[18] = {
     // VGA and lower
-    0.40f,       // L, L
-    0.50f,       // L, H
-    0.40f,       // L, D
-    0.60f,       // H ,L
-    0.60f,       // H, H
-    0.60f,       // H, D
-    0.50f,       // D, L
-    0.50f,       // D, D
-    0.50f,       // D, H
+    0.40f,  // L, L
+    0.50f,  // L, H
+    0.40f,  // L, D
+    0.60f,  // H ,L
+    0.60f,  // H, H
+    0.60f,  // H, D
+    0.50f,  // D, L
+    0.50f,  // D, D
+    0.50f,  // D, H
 
     // over VGA
-    0.40f,       // L, L
-    0.50f,       // L, H
-    0.40f,       // L, D
-    0.60f,       // H ,L
-    0.60f,       // H, H
-    0.60f,       // H, D
-    0.50f,       // D, L
-    0.50f,       // D, D
-    0.50f,       // D, H
+    0.40f,  // L, L
+    0.50f,  // L, H
+    0.40f,  // L, D
+    0.60f,  // H ,L
+    0.60f,  // H, H
+    0.60f,  // H, D
+    0.50f,  // D, L
+    0.50f,  // D, D
+    0.50f,  // D, H
 };
 
 // Threshold on the target rate relative to transitional rate.
@@ -108,73 +108,73 @@
 // motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
 // rate = 0/1/2, for target rate state relative to transition rate.
 const uint8_t kSpatialAction[27] = {
-// rateClass = 0:
-    1,       // L, L
-    1,       // L, H
-    1,       // L, D
-    4,       // H ,L
-    1,       // H, H
-    4,       // H, D
-    4,       // D, L
-    1,       // D, H
-    2,       // D, D
+    // rateClass = 0:
+    1,  // L, L
+    1,  // L, H
+    1,  // L, D
+    4,  // H ,L
+    1,  // H, H
+    4,  // H, D
+    4,  // D, L
+    1,  // D, H
+    2,  // D, D
 
-// rateClass = 1:
-    1,       // L, L
-    1,       // L, H
-    1,       // L, D
-    2,       // H ,L
-    1,       // H, H
-    2,       // H, D
-    2,       // D, L
-    1,       // D, H
-    2,       // D, D
+    // rateClass = 1:
+    1,  // L, L
+    1,  // L, H
+    1,  // L, D
+    2,  // H ,L
+    1,  // H, H
+    2,  // H, D
+    2,  // D, L
+    1,  // D, H
+    2,  // D, D
 
-// rateClass = 2:
-    1,       // L, L
-    1,       // L, H
-    1,       // L, D
-    2,       // H ,L
-    1,       // H, H
-    2,       // H, D
-    2,       // D, L
-    1,       // D, H
-    2,       // D, D
+    // rateClass = 2:
+    1,  // L, L
+    1,  // L, H
+    1,  // L, D
+    2,  // H ,L
+    1,  // H, H
+    2,  // H, D
+    2,  // D, L
+    1,  // D, H
+    2,  // D, D
 };
 
 const uint8_t kTemporalAction[27] = {
-// rateClass = 0:
-    3,       // L, L
-    2,       // L, H
-    2,       // L, D
-    1,       // H ,L
-    3,       // H, H
-    1,       // H, D
-    1,       // D, L
-    2,       // D, H
-    1,       // D, D
+    // rateClass = 0:
+    3,  // L, L
+    2,  // L, H
+    2,  // L, D
+    1,  // H ,L
+    3,  // H, H
+    1,  // H, D
+    1,  // D, L
+    2,  // D, H
+    1,  // D, D
 
-// rateClass = 1:
-    3,       // L, L
-    3,       // L, H
-    3,       // L, D
-    1,       // H ,L
-    3,       // H, H
-    1,       // H, D
-    1,       // D, L
-    3,       // D, H
-    1,       // D, D
+    // rateClass = 1:
+    3,  // L, L
+    3,  // L, H
+    3,  // L, D
+    1,  // H ,L
+    3,  // H, H
+    1,  // H, D
+    1,  // D, L
+    3,  // D, H
+    1,  // D, D
 
-// rateClass = 2:
-    1,       // L, L
-    3,       // L, H
-    3,       // L, D
-    1,       // H ,L
-    3,       // H, H
-    1,       // H, D
-    1,       // D, L
-    3,       // D, H
-    1,       // D, D
+    // rateClass = 2:
+    1,  // L, L
+    3,  // L, H
+    3,  // L, D
+    1,  // H ,L
+    3,  // H, H
+    1,  // H, D
+    1,  // D, L
+    3,  // D, H
+    1,  // D, D
 };
 
 // Control the total amount of down-sampling allowed.
diff --git a/webrtc/modules/video_coding/qm_select_unittest.cc b/webrtc/modules/video_coding/qm_select_unittest.cc
index 61a12ef..f8542ec 100644
--- a/webrtc/modules/video_coding/qm_select_unittest.cc
+++ b/webrtc/modules/video_coding/qm_select_unittest.cc
@@ -32,10 +32,9 @@
 class QmSelectTest : public ::testing::Test {
  protected:
   QmSelectTest()
-      :  qm_resolution_(new VCMQmResolution()),
-         content_metrics_(new VideoContentMetrics()),
-         qm_scale_(NULL) {
-  }
+      : qm_resolution_(new VCMQmResolution()),
+        content_metrics_(new VideoContentMetrics()),
+        qm_scale_(NULL) {}
   VCMQmResolution* qm_resolution_;
   VideoContentMetrics* content_metrics_;
   VCMResolutionScale* qm_scale_;
@@ -87,8 +86,8 @@
   qm_resolution_->UpdateContent(content_metrics);
   // Content metrics are NULL: Expect success and no down-sampling action.
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, 30.0f));
 }
 
 // TODO(marpan): Add a test for number of temporal layers > 1.
@@ -118,8 +117,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
 }
 
 // Rate is well below transition, down-sampling action is taken,
@@ -149,40 +148,40 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, low spatial: 2/3 temporal is expected.
   UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
 
   qm_resolution_->ResetDownSamplingState();
   // Medium motion, low spatial: 2x2 spatial expected.
   UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // High motion, high spatial: 2/3 temporal expected.
   UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, high spatial: 1/2 temporal expected.
   UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
-                                      15.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
 
   qm_resolution_->ResetDownSamplingState();
   // Medium motion, high spatial: 1/2 temporal expected.
@@ -190,8 +189,8 @@
                       kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
-                                      15.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
 
   qm_resolution_->ResetDownSamplingState();
   // High motion, medium spatial: 2x2 spatial expected.
@@ -200,8 +199,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
   // Target frame rate for frame dropper should be the same as previous == 15.
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
@@ -209,8 +208,8 @@
                       kSpatialMedium);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
-                                      15.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
 
   qm_resolution_->ResetDownSamplingState();
   // Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
@@ -218,8 +217,8 @@
                       kSpatialMedium);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
 }
 
 // Rate mis-match is high, and we have over-shooting.
@@ -249,16 +248,16 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 360, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, high spatial
   UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
 }
 
 // Rate mis-match is high, target rate is below max for down-sampling,
@@ -288,16 +287,16 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, high spatial
   UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
 }
 
 // Buffer is underflowing, and target rate is below max for down-sampling,
@@ -332,16 +331,16 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 360, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, high spatial
   UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
 }
 
 // Target rate is below max for down-sampling, but buffer level is stable,
@@ -376,16 +375,16 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
 
   qm_resolution_->ResetDownSamplingState();
   // Low motion, high spatial
   UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
 }
 
 // Very low rate, but no spatial down-sampling below some size (QCIF).
@@ -414,8 +413,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, 30.0f));
 }
 
 // Very low rate, but no frame reduction below some frame_rate (8fps).
@@ -445,8 +444,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      8.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 8.0f));
 }
 
 // Two stages: spatial down-sample and then back up spatially,
@@ -468,7 +467,7 @@
   int incoming_frame_rate[] = {30, 30, 30};
   uint8_t fraction_lost[] = {10, 10, 10};
   UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                    fraction_lost, 3);
+                   fraction_lost, 3);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // High motion, low spatial.
@@ -476,8 +475,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   // Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
   qm_resolution_->ResetRates();
@@ -493,8 +492,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
   float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
 
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
   EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -522,7 +521,7 @@
   int incoming_frame_rate[] = {30, 30, 30};
   uint8_t fraction_lost[] = {10, 10, 10};
   UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                    fraction_lost, 3);
+                   fraction_lost, 3);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // High motion, low spatial.
@@ -530,8 +529,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   // Reset rates and simulate under-shooting scenario.: expect to go back up.
   // Goes up spatially in two stages for 1/2x1/2 down-sampling.
@@ -548,8 +547,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
   float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
 
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
   EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -577,7 +576,7 @@
   int incoming_frame_rate[] = {30, 30, 30};
   uint8_t fraction_lost[] = {10, 10, 10};
   UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                    fraction_lost, 3);
+                   fraction_lost, 3);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // High motion, low spatial.
@@ -585,8 +584,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   // Reset and simulate large rate mis-match: expect no action to go back up.
   qm_resolution_->ResetRates();
@@ -601,8 +600,8 @@
                    fraction_lost2, 5);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, 30.0f));
 }
 
 // Two stages: temporally down-sample and then back up temporally,
@@ -632,8 +631,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
-                                      15.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
 
   // Reset rates and go up in rate: expect to go back up.
   qm_resolution_->ResetRates();
@@ -646,8 +645,8 @@
                    fraction_lost2, 5);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
 }
 
 // Two stages: temporal down-sample and then back up temporally, since encoder
@@ -669,7 +668,7 @@
   int incoming_frame_rate[] = {30, 30, 30};
   uint8_t fraction_lost[] = {10, 10, 10};
   UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
-                    fraction_lost, 3);
+                   fraction_lost, 3);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // Low motion, high spatial.
@@ -677,8 +676,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
-                                      15.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
 
   // Reset rates and simulate under-shooting scenario.: expect to go back up.
   qm_resolution_->ResetRates();
@@ -691,8 +690,8 @@
                    fraction_lost2, 5);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
 }
 
 // Two stages: temporal down-sample and then no action to go up,
@@ -736,8 +735,8 @@
                    fraction_lost2, 5);
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
-                                      15.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 15.0f));
 }
 // 3 stages: spatial down-sample, followed by temporal down-sample,
 // and then go up to full state, as encoding rate has increased.
@@ -766,8 +765,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   // Change content data: expect temporal down-sample.
   qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -780,7 +779,7 @@
   int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
   uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
   UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
-                    fraction_lost2, 5);
+                   fraction_lost2, 5);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // Low motion, high spatial.
@@ -788,8 +787,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
 
   // Reset rates and go high up in rate: expect to go back up both spatial
   // and temporally. The 1/2x1/2 spatial is undone in two stages.
@@ -806,8 +805,8 @@
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
   float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
-                                      480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+                                      360, 30.0f));
 
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
   EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -842,8 +841,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360,
-              30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, 30.0f));
 
   // Reset and lower rates to get another spatial action (3/4x3/4).
   // Lower the frame rate for spatial to be selected again.
@@ -865,8 +864,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 270, 10.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 270, 10.0f));
 
   // Reset and go to very low rate: no action should be taken,
   // we went down too much already.
@@ -883,8 +882,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270,
-                                      10.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, 10.0f));
 }
 
 // Multiple down-sampling stages and then undo all of them.
@@ -917,8 +916,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 360, 30.0f));
   // Go down 2/3 temporal.
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
   EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -936,8 +935,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
 
   // Go down 3/4x3/4 spatial:
   qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
@@ -947,7 +946,7 @@
   int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
   uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
   UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
-                    fraction_lost3, 5);
+                   fraction_lost3, 5);
 
   // Update content: motion level, and 3 spatial prediction errors.
   // High motion, low spatial.
@@ -957,8 +956,8 @@
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
   // The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
   // so scale factor is 2.0.
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      20.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 20.0f));
 
   // Reset rates and go high up in rate: expect to go up:
   // 1/2x1x2 spatial and 1/2 temporally.
@@ -1018,8 +1017,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 
   // Go down 2/3 temporal.
   qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -1039,8 +1038,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
 
   // Go up 2/3 temporally.
   qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
@@ -1076,8 +1075,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
 
   // Go up spatial and temporal. Spatial undoing is done in 2 stages.
   qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
@@ -1092,8 +1091,8 @@
 
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   float scale = (4.0f / 3.0f) / 2.0f;
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
-                                      480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+                                      360, 30.0f));
 
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
   EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -1131,8 +1130,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 360, 30.0f));
 
   // Go down 2/3 temporal.
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1151,8 +1150,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
-                                      20.5f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
 
   // Go up 2/3 temporal.
   qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
@@ -1184,8 +1183,8 @@
 
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f,
-                                      1.0f, 640, 480, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+                                      640, 480, 30.0f));
 }
 
 // Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
@@ -1215,8 +1214,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
-                                      1.0f, 480, 360, 30.0f));
+  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+                                      480, 360, 30.0f));
 
   // Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
   qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1235,8 +1234,8 @@
   EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
   EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
   EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
-  EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
-                                      30.0f));
+  EXPECT_TRUE(
+      IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
 }
 
 void QmSelectTest::InitQmNativeData(float initial_bit_rate,
@@ -1244,11 +1243,9 @@
                                     int native_width,
                                     int native_height,
                                     int num_layers) {
-  EXPECT_EQ(0, qm_resolution_->Initialize(initial_bit_rate,
-                                          user_frame_rate,
-                                          native_width,
-                                          native_height,
-                                          num_layers));
+  EXPECT_EQ(
+      0, qm_resolution_->Initialize(initial_bit_rate, user_frame_rate,
+                                    native_width, native_height, num_layers));
 }
 
 void QmSelectTest::UpdateQmContentData(float motion_metric,
@@ -1281,8 +1278,7 @@
     float encoder_sent_rate_update = encoder_sent_rate[i];
     float incoming_frame_rate_update = incoming_frame_rate[i];
     uint8_t fraction_lost_update = fraction_lost[i];
-    qm_resolution_->UpdateRates(target_rate_update,
-                                encoder_sent_rate_update,
+    qm_resolution_->UpdateRates(target_rate_update, encoder_sent_rate_update,
                                 incoming_frame_rate_update,
                                 fraction_lost_update);
   }
diff --git a/webrtc/modules/video_coding/receiver.cc b/webrtc/modules/video_coding/receiver.cc
index 91cdd5e..fa2a2dc 100644
--- a/webrtc/modules/video_coding/receiver.cc
+++ b/webrtc/modules/video_coding/receiver.cc
@@ -14,6 +14,7 @@
 
 #include <cstdlib>
 #include <utility>
+#include <vector>
 
 #include "webrtc/base/logging.h"
 #include "webrtc/base/trace_event.h"
@@ -72,8 +73,8 @@
   // Insert the packet into the jitter buffer. The packet can either be empty or
   // contain media at this point.
   bool retransmitted = false;
-  const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
-                                                             &retransmitted);
+  const VCMFrameBufferEnum ret =
+      jitter_buffer_.InsertPacket(packet, &retransmitted);
   if (ret == kOldPacket) {
     return VCM_OK;
   } else if (ret == kFlushIndicator) {
@@ -96,13 +97,13 @@
 }
 
 VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
-                                               int64_t& next_render_time_ms,
+                                               int64_t* next_render_time_ms,
                                                bool prefer_late_decoding) {
   const int64_t start_time_ms = clock_->TimeInMilliseconds();
   uint32_t frame_timestamp = 0;
   // Exhaust wait time to get a complete frame for decoding.
-  bool found_frame = jitter_buffer_.NextCompleteTimestamp(
-      max_wait_time_ms, &frame_timestamp);
+  bool found_frame =
+      jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
 
   if (!found_frame)
     found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
@@ -114,14 +115,14 @@
   timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
   const int64_t now_ms = clock_->TimeInMilliseconds();
   timing_->UpdateCurrentDelay(frame_timestamp);
-  next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
+  *next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
   // Check render timing.
   bool timing_error = false;
   // Assume that render timing errors are due to changes in the video stream.
-  if (next_render_time_ms < 0) {
+  if (*next_render_time_ms < 0) {
     timing_error = true;
-  } else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
-    int frame_delay = static_cast<int>(std::abs(next_render_time_ms - now_ms));
+  } else if (std::abs(*next_render_time_ms - now_ms) > max_video_delay_ms_) {
+    int frame_delay = static_cast<int>(std::abs(*next_render_time_ms - now_ms));
     LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
                     << "delay bounds (" << frame_delay << " > "
                     << max_video_delay_ms_
@@ -143,12 +144,13 @@
 
   if (prefer_late_decoding) {
     // Decode frame as close as possible to the render timestamp.
-    const int32_t available_wait_time = max_wait_time_ms -
+    const int32_t available_wait_time =
+        max_wait_time_ms -
         static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
-    uint16_t new_max_wait_time = static_cast<uint16_t>(
-        VCM_MAX(available_wait_time, 0));
+    uint16_t new_max_wait_time =
+        static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
     uint32_t wait_time_ms = timing_->MaxWaitingTime(
-        next_render_time_ms, clock_->TimeInMilliseconds());
+        *next_render_time_ms, clock_->TimeInMilliseconds());
     if (new_max_wait_time < wait_time_ms) {
       // We're not allowed to wait until the frame is supposed to be rendered,
       // waiting as long as we're allowed to avoid busy looping, and then return
@@ -165,9 +167,9 @@
   if (frame == NULL) {
     return NULL;
   }
-  frame->SetRenderTime(next_render_time_ms);
-  TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
-                          "SetRenderTS", "render_time", next_render_time_ms);
+  frame->SetRenderTime(*next_render_time_ms);
+  TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
+                          "render_time", *next_render_time_ms);
   if (!frame->Complete()) {
     // Update stats for incomplete frames.
     bool retransmitted = false;
@@ -187,8 +189,7 @@
   jitter_buffer_.ReleaseFrame(frame);
 }
 
-void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
-                                    uint32_t* framerate) {
+void VCMReceiver::ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate) {
   assert(bitrate);
   assert(framerate);
   jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
@@ -210,8 +211,7 @@
 void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
                                   int max_packet_age_to_nack,
                                   int max_incomplete_time_ms) {
-  jitter_buffer_.SetNackSettings(max_nack_list_size,
-                                 max_packet_age_to_nack,
+  jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
                                  max_incomplete_time_ms);
 }
 
diff --git a/webrtc/modules/video_coding/receiver.h b/webrtc/modules/video_coding/receiver.h
index a30d16c..ff0eef8 100644
--- a/webrtc/modules/video_coding/receiver.h
+++ b/webrtc/modules/video_coding/receiver.h
@@ -11,6 +11,8 @@
 #ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
 #define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
 
+#include <vector>
+
 #include "webrtc/modules/video_coding/jitter_buffer.h"
 #include "webrtc/modules/video_coding/packet.h"
 #include "webrtc/modules/video_coding/timing.h"
@@ -25,9 +27,7 @@
 
 class VCMReceiver {
  public:
-  VCMReceiver(VCMTiming* timing,
-              Clock* clock,
-              EventFactory* event_factory);
+  VCMReceiver(VCMTiming* timing, Clock* clock, EventFactory* event_factory);
 
   // Using this constructor, you can specify a different event factory for the
   // jitter buffer. Useful for unit tests when you want to simulate incoming
@@ -46,7 +46,7 @@
                        uint16_t frame_width,
                        uint16_t frame_height);
   VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
-                                    int64_t& next_render_time_ms,
+                                    int64_t* next_render_time_ms,
                                     bool prefer_late_decoding);
   void ReleaseFrame(VCMEncodedFrame* frame);
   void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
diff --git a/webrtc/modules/video_coding/receiver_unittest.cc b/webrtc/modules/video_coding/receiver_unittest.cc
index d51b004..1f3a144 100644
--- a/webrtc/modules/video_coding/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/receiver_unittest.cc
@@ -11,6 +11,7 @@
 
 #include <list>
 #include <queue>
+#include <vector>
 
 #include "testing/gtest/include/gtest/gtest.h"
 #include "webrtc/base/checks.h"
@@ -34,14 +35,11 @@
       : clock_(new SimulatedClock(0)),
         timing_(clock_.get()),
         receiver_(&timing_, clock_.get(), &event_factory_) {
-
-    stream_generator_.reset(new
-        StreamGenerator(0, clock_->TimeInMilliseconds()));
+    stream_generator_.reset(
+        new StreamGenerator(0, clock_->TimeInMilliseconds()));
   }
 
-  virtual void SetUp() {
-    receiver_.Reset();
-  }
+  virtual void SetUp() { receiver_.Reset(); }
 
   int32_t InsertPacket(int index) {
     VCMPacket packet;
@@ -79,7 +77,7 @@
   bool DecodeNextFrame() {
     int64_t render_time_ms = 0;
     VCMEncodedFrame* frame =
-        receiver_.FrameForDecoding(0, render_time_ms, false);
+        receiver_.FrameForDecoding(0, &render_time_ms, false);
     if (!frame)
       return false;
     receiver_.ReleaseFrame(frame);
@@ -116,7 +114,7 @@
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs,
-      receiver_.RenderBufferSizeMs());
+            receiver_.RenderBufferSizeMs());
 }
 
 TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
@@ -132,7 +130,7 @@
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs,
-      receiver_.RenderBufferSizeMs());
+            receiver_.RenderBufferSizeMs());
 }
 
 TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
@@ -143,7 +141,7 @@
   }
   int64_t next_render_time_ms = 0;
   VCMEncodedFrame* frame =
-      receiver_.FrameForDecoding(10, next_render_time_ms, false);
+      receiver_.FrameForDecoding(10, &next_render_time_ms, false);
   EXPECT_TRUE(frame == NULL);
   receiver_.ReleaseFrame(frame);
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
@@ -161,7 +159,7 @@
   const int kMaxNonDecodableDuration = 500;
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs);
@@ -178,7 +176,7 @@
   const int kMaxPacketAgeToNack = 1000;
   const int kMaxNonDecodableDuration = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
   for (int i = 0; i < kNumFrames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
@@ -194,24 +192,23 @@
   const size_t kMaxNackListSize = 1000;
   const int kMaxPacketAgeToNack = 1000;
   const int kMaxNonDecodableDuration = 500;
-  const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
-      kMaxNonDecodableDuration + 500) / 1000;
+  const int kMaxNonDecodableDurationFrames =
+      (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   receiver_.SetMinReceiverDelay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
   // Insert an incomplete frame.
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
   // Insert enough frames to have too long non-decodable sequence.
-  for (int i = 0; i < kMaxNonDecodableDurationFrames;
-       ++i) {
+  for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
-      key_frame_inserted);
+                                  key_frame_inserted);
   EXPECT_TRUE(DecodeNextFrame());
   // Make sure we get a key frame request.
   bool request_key_frame = false;
@@ -225,11 +222,11 @@
   const size_t kMaxNackListSize = 1000;
   const int kMaxPacketAgeToNack = 1000;
   const int kMaxNonDecodableDuration = 500;
-  const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
-      kMaxNonDecodableDuration + 500) / 1000;
+  const int kMaxNonDecodableDurationFrames =
+      (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   receiver_.SetMinReceiverDelay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
@@ -237,13 +234,12 @@
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
   // Insert all but one frame to not trigger a key frame request due to
   // too long duration of non-decodable frames.
-  for (int i = 0; i < kMaxNonDecodableDurationFrames - 1;
-       ++i) {
+  for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
-      key_frame_inserted);
+                                  key_frame_inserted);
   EXPECT_TRUE(DecodeNextFrame());
   // Make sure we don't get a key frame request since we haven't generated
   // enough frames.
@@ -258,25 +254,24 @@
   const size_t kMaxNackListSize = 1000;
   const int kMaxPacketAgeToNack = 1000;
   const int kMaxNonDecodableDuration = 500;
-  const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
-      kMaxNonDecodableDuration + 500) / 1000;
+  const int kMaxNonDecodableDurationFrames =
+      (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   receiver_.SetMinReceiverDelay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
   // Insert enough frames to have too long non-decodable sequence, except that
   // we don't have any losses.
-  for (int i = 0; i < kMaxNonDecodableDurationFrames;
-       ++i) {
+  for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   // Insert an incomplete frame.
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
-      key_frame_inserted);
+                                  key_frame_inserted);
   EXPECT_TRUE(DecodeNextFrame());
   // Make sure we don't get a key frame request since the non-decodable duration
   // is only one frame.
@@ -291,25 +286,24 @@
   const size_t kMaxNackListSize = 1000;
   const int kMaxPacketAgeToNack = 1000;
   const int kMaxNonDecodableDuration = 500;
-  const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
-      kMaxNonDecodableDuration + 500) / 1000;
+  const int kMaxNonDecodableDurationFrames =
+      (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
   const int kMinDelayMs = 500;
   receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
-      kMaxNonDecodableDuration);
+                            kMaxNonDecodableDuration);
   receiver_.SetMinReceiverDelay(kMinDelayMs);
   int64_t key_frame_inserted = clock_->TimeInMilliseconds();
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
   // Insert an incomplete frame.
   EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
   // Insert enough frames to have too long non-decodable sequence.
-  for (int i = 0; i < kMaxNonDecodableDurationFrames;
-       ++i) {
+  for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
     EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
   }
   EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
   // Advance time until it's time to decode the key frame.
   clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
-      key_frame_inserted);
+                                  key_frame_inserted);
   EXPECT_TRUE(DecodeNextFrame());
   // Make sure we don't get a key frame request since we have a key frame
   // in the list.
@@ -340,7 +334,7 @@
   // Return true if some frame arrives between now and now+|milliseconds|.
   bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
     return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
-  };
+  }
 
   bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
     int64_t start_time = TimeInMicroseconds();
@@ -364,7 +358,7 @@
       SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
     }
     return frame_injected;
-  };
+  }
 
   // Input timestamps are in unit Milliseconds.
   // And |arrive_timestamps| must be positive and in increasing order.
@@ -431,7 +425,7 @@
 
   bool Set() override { return true; }
 
-  EventTypeWrapper Wait(unsigned long max_time) override {
+  EventTypeWrapper Wait(unsigned long max_time) override {  // NOLINT
     if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) &&
         stop_on_frame_) {
       return EventTypeWrapper::kEventSignaled;
@@ -447,7 +441,6 @@
 
 class VCMReceiverTimingTest : public ::testing::Test {
  protected:
-
   VCMReceiverTimingTest()
 
       : clock_(&stream_generator_, &receiver_),
@@ -460,7 +453,6 @@
             rtc::scoped_ptr<EventWrapper>(
                 new FrameInjectEvent(&clock_, true))) {}
 
-
   virtual void SetUp() { receiver_.Reset(); }
 
   SimulatedClockWithFrames clock_;
@@ -506,7 +498,7 @@
   while (num_frames_return < kNumFrames) {
     int64_t start_time = clock_.TimeInMilliseconds();
     VCMEncodedFrame* frame =
-        receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, false);
+        receiver_.FrameForDecoding(kMaxWaitTime, &next_render_time, false);
     int64_t end_time = clock_.TimeInMilliseconds();
 
     // In any case the FrameForDecoding should not wait longer than
@@ -566,9 +558,8 @@
   while (num_frames_return < kNumFrames) {
     int64_t start_time = clock_.TimeInMilliseconds();
 
-    VCMEncodedFrame* frame =
-        receiver_.FrameForDecoding(kMaxWaitTime, next_render_time,
-                                   prefer_late_decoding);
+    VCMEncodedFrame* frame = receiver_.FrameForDecoding(
+        kMaxWaitTime, &next_render_time, prefer_late_decoding);
     int64_t end_time = clock_.TimeInMilliseconds();
     if (frame) {
       EXPECT_EQ(frame->RenderTimeMs() - max_decode_ms - render_delay_ms,
diff --git a/webrtc/modules/video_coding/rtt_filter.cc b/webrtc/modules/video_coding/rtt_filter.cc
index 30a6946..742f70f 100644
--- a/webrtc/modules/video_coding/rtt_filter.cc
+++ b/webrtc/modules/video_coding/rtt_filter.cc
@@ -8,13 +8,14 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "webrtc/modules/video_coding/internal_defines.h"
 #include "webrtc/modules/video_coding/rtt_filter.h"
 
 #include <math.h>
 #include <stdlib.h>
 #include <string.h>
 
+#include "webrtc/modules/video_coding/internal_defines.h"
+
 namespace webrtc {
 
 VCMRttFilter::VCMRttFilter()
@@ -22,181 +23,143 @@
       _jumpStdDevs(2.5),
       _driftStdDevs(3.5),
       _detectThreshold(kMaxDriftJumpCount) {
-    Reset();
+  Reset();
 }
 
-VCMRttFilter&
-VCMRttFilter::operator=(const VCMRttFilter& rhs)
-{
-    if (this != &rhs)
-    {
-        _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
-        _avgRtt = rhs._avgRtt;
-        _varRtt = rhs._varRtt;
-        _maxRtt = rhs._maxRtt;
-        _filtFactCount = rhs._filtFactCount;
-        _jumpCount = rhs._jumpCount;
-        _driftCount = rhs._driftCount;
-        memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
-        memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
+VCMRttFilter& VCMRttFilter::operator=(const VCMRttFilter& rhs) {
+  if (this != &rhs) {
+    _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
+    _avgRtt = rhs._avgRtt;
+    _varRtt = rhs._varRtt;
+    _maxRtt = rhs._maxRtt;
+    _filtFactCount = rhs._filtFactCount;
+    _jumpCount = rhs._jumpCount;
+    _driftCount = rhs._driftCount;
+    memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
+    memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
+  }
+  return *this;
+}
+
+void VCMRttFilter::Reset() {
+  _gotNonZeroUpdate = false;
+  _avgRtt = 0;
+  _varRtt = 0;
+  _maxRtt = 0;
+  _filtFactCount = 1;
+  _jumpCount = 0;
+  _driftCount = 0;
+  memset(_jumpBuf, 0, kMaxDriftJumpCount);
+  memset(_driftBuf, 0, kMaxDriftJumpCount);
+}
+
+void VCMRttFilter::Update(int64_t rttMs) {
+  if (!_gotNonZeroUpdate) {
+    if (rttMs == 0) {
+      return;
     }
-    return *this;
+    _gotNonZeroUpdate = true;
+  }
+
+  // Sanity check
+  if (rttMs > 3000) {
+    rttMs = 3000;
+  }
+
+  double filtFactor = 0;
+  if (_filtFactCount > 1) {
+    filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
+  }
+  _filtFactCount++;
+  if (_filtFactCount > _filtFactMax) {
+    // This prevents filtFactor from going above
+    // (_filtFactMax - 1) / _filtFactMax,
+    // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
+    _filtFactCount = _filtFactMax;
+  }
+  double oldAvg = _avgRtt;
+  double oldVar = _varRtt;
+  _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
+  _varRtt = filtFactor * _varRtt +
+            (1 - filtFactor) * (rttMs - _avgRtt) * (rttMs - _avgRtt);
+  _maxRtt = VCM_MAX(rttMs, _maxRtt);
+  if (!JumpDetection(rttMs) || !DriftDetection(rttMs)) {
+    // In some cases we don't want to update the statistics
+    _avgRtt = oldAvg;
+    _varRtt = oldVar;
+  }
 }
 
-void
-VCMRttFilter::Reset()
-{
-    _gotNonZeroUpdate = false;
-    _avgRtt = 0;
-    _varRtt = 0;
-    _maxRtt = 0;
-    _filtFactCount = 1;
+bool VCMRttFilter::JumpDetection(int64_t rttMs) {
+  double diffFromAvg = _avgRtt - rttMs;
+  if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt)) {
+    int diffSign = (diffFromAvg >= 0) ? 1 : -1;
+    int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
+    if (diffSign != jumpCountSign) {
+      // Since the signs differ the samples currently
+      // in the buffer is useless as they represent a
+      // jump in a different direction.
+      _jumpCount = 0;
+    }
+    if (abs(_jumpCount) < kMaxDriftJumpCount) {
+      // Update the buffer used for the short time
+      // statistics.
+      // The sign of the diff is used for updating the counter since
+      // we want to use the same buffer for keeping track of when
+      // the RTT jumps down and up.
+      _jumpBuf[abs(_jumpCount)] = rttMs;
+      _jumpCount += diffSign;
+    }
+    if (abs(_jumpCount) >= _detectThreshold) {
+      // Detected an RTT jump
+      ShortRttFilter(_jumpBuf, abs(_jumpCount));
+      _filtFactCount = _detectThreshold + 1;
+      _jumpCount = 0;
+    } else {
+      return false;
+    }
+  } else {
     _jumpCount = 0;
+  }
+  return true;
+}
+
+bool VCMRttFilter::DriftDetection(int64_t rttMs) {
+  if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt)) {
+    if (_driftCount < kMaxDriftJumpCount) {
+      // Update the buffer used for the short time
+      // statistics.
+      _driftBuf[_driftCount] = rttMs;
+      _driftCount++;
+    }
+    if (_driftCount >= _detectThreshold) {
+      // Detected an RTT drift
+      ShortRttFilter(_driftBuf, _driftCount);
+      _filtFactCount = _detectThreshold + 1;
+      _driftCount = 0;
+    }
+  } else {
     _driftCount = 0;
-    memset(_jumpBuf, 0, kMaxDriftJumpCount);
-    memset(_driftBuf, 0, kMaxDriftJumpCount);
+  }
+  return true;
 }
 
-void
-VCMRttFilter::Update(int64_t rttMs)
-{
-    if (!_gotNonZeroUpdate)
-    {
-        if (rttMs == 0)
-        {
-            return;
-        }
-        _gotNonZeroUpdate = true;
+void VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length) {
+  if (length == 0) {
+    return;
+  }
+  _maxRtt = 0;
+  _avgRtt = 0;
+  for (uint32_t i = 0; i < length; i++) {
+    if (buf[i] > _maxRtt) {
+      _maxRtt = buf[i];
     }
-
-    // Sanity check
-    if (rttMs > 3000)
-    {
-        rttMs = 3000;
-    }
-
-    double filtFactor = 0;
-    if (_filtFactCount > 1)
-    {
-        filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
-    }
-    _filtFactCount++;
-    if (_filtFactCount > _filtFactMax)
-    {
-        // This prevents filtFactor from going above
-        // (_filtFactMax - 1) / _filtFactMax,
-        // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
-        _filtFactCount = _filtFactMax;
-    }
-    double oldAvg = _avgRtt;
-    double oldVar = _varRtt;
-    _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
-    _varRtt = filtFactor * _varRtt + (1 - filtFactor) *
-                (rttMs - _avgRtt) * (rttMs - _avgRtt);
-    _maxRtt = VCM_MAX(rttMs, _maxRtt);
-    if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
-    {
-        // In some cases we don't want to update the statistics
-        _avgRtt = oldAvg;
-        _varRtt = oldVar;
-    }
+    _avgRtt += buf[i];
+  }
+  _avgRtt = _avgRtt / static_cast<double>(length);
 }
 
-bool
-VCMRttFilter::JumpDetection(int64_t rttMs)
-{
-    double diffFromAvg = _avgRtt - rttMs;
-    if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
-    {
-        int diffSign = (diffFromAvg >= 0) ? 1 : -1;
-        int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
-        if (diffSign != jumpCountSign)
-        {
-            // Since the signs differ the samples currently
-            // in the buffer is useless as they represent a
-            // jump in a different direction.
-            _jumpCount = 0;
-        }
-        if (abs(_jumpCount) < kMaxDriftJumpCount)
-        {
-            // Update the buffer used for the short time
-            // statistics.
-            // The sign of the diff is used for updating the counter since
-            // we want to use the same buffer for keeping track of when
-            // the RTT jumps down and up.
-            _jumpBuf[abs(_jumpCount)] = rttMs;
-            _jumpCount += diffSign;
-        }
-        if (abs(_jumpCount) >= _detectThreshold)
-        {
-            // Detected an RTT jump
-            ShortRttFilter(_jumpBuf, abs(_jumpCount));
-            _filtFactCount = _detectThreshold + 1;
-            _jumpCount = 0;
-        }
-        else
-        {
-            return false;
-        }
-    }
-    else
-    {
-        _jumpCount = 0;
-    }
-    return true;
+int64_t VCMRttFilter::RttMs() const {
+  return static_cast<int64_t>(_maxRtt + 0.5);
 }
-
-bool
-VCMRttFilter::DriftDetection(int64_t rttMs)
-{
-    if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
-    {
-        if (_driftCount < kMaxDriftJumpCount)
-        {
-            // Update the buffer used for the short time
-            // statistics.
-            _driftBuf[_driftCount] = rttMs;
-            _driftCount++;
-        }
-        if (_driftCount >= _detectThreshold)
-        {
-            // Detected an RTT drift
-            ShortRttFilter(_driftBuf, _driftCount);
-            _filtFactCount = _detectThreshold + 1;
-            _driftCount = 0;
-        }
-    }
-    else
-    {
-        _driftCount = 0;
-    }
-    return true;
-}
-
-void
-VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length)
-{
-    if (length == 0)
-    {
-        return;
-    }
-    _maxRtt = 0;
-    _avgRtt = 0;
-    for (uint32_t i=0; i < length; i++)
-    {
-        if (buf[i] > _maxRtt)
-        {
-            _maxRtt = buf[i];
-        }
-        _avgRtt += buf[i];
-    }
-    _avgRtt = _avgRtt / static_cast<double>(length);
-}
-
-int64_t
-VCMRttFilter::RttMs() const
-{
-    return static_cast<int64_t>(_maxRtt + 0.5);
-}
-
-}
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/rtt_filter.h b/webrtc/modules/video_coding/rtt_filter.h
index 9e14a1a..f5de532 100644
--- a/webrtc/modules/video_coding/rtt_filter.h
+++ b/webrtc/modules/video_coding/rtt_filter.h
@@ -13,56 +13,54 @@
 
 #include "webrtc/typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-class VCMRttFilter
-{
-public:
-    VCMRttFilter();
+class VCMRttFilter {
+ public:
+  VCMRttFilter();
 
-    VCMRttFilter& operator=(const VCMRttFilter& rhs);
+  VCMRttFilter& operator=(const VCMRttFilter& rhs);
 
-    // Resets the filter.
-    void Reset();
-    // Updates the filter with a new sample.
-    void Update(int64_t rttMs);
-    // A getter function for the current RTT level in ms.
-    int64_t RttMs() const;
+  // Resets the filter.
+  void Reset();
+  // Updates the filter with a new sample.
+  void Update(int64_t rttMs);
+  // A getter function for the current RTT level in ms.
+  int64_t RttMs() const;
 
-private:
-    // The size of the drift and jump memory buffers
-    // and thus also the detection threshold for these
-    // detectors in number of samples.
-    enum { kMaxDriftJumpCount = 5 };
-    // Detects RTT jumps by comparing the difference between
-    // samples and average to the standard deviation.
-    // Returns true if the long time statistics should be updated
-    // and false otherwise
-    bool JumpDetection(int64_t rttMs);
-    // Detects RTT drifts by comparing the difference between
-    // max and average to the standard deviation.
-    // Returns true if the long time statistics should be updated
-    // and false otherwise
-    bool DriftDetection(int64_t rttMs);
-    // Computes the short time average and maximum of the vector buf.
-    void ShortRttFilter(int64_t* buf, uint32_t length);
+ private:
+  // The size of the drift and jump memory buffers
+  // and thus also the detection threshold for these
+  // detectors in number of samples.
+  enum { kMaxDriftJumpCount = 5 };
+  // Detects RTT jumps by comparing the difference between
+  // samples and average to the standard deviation.
+  // Returns true if the long time statistics should be updated
+  // and false otherwise
+  bool JumpDetection(int64_t rttMs);
+  // Detects RTT drifts by comparing the difference between
+  // max and average to the standard deviation.
+  // Returns true if the long time statistics should be updated
+  // and false otherwise
+  bool DriftDetection(int64_t rttMs);
+  // Computes the short time average and maximum of the vector buf.
+  void ShortRttFilter(int64_t* buf, uint32_t length);
 
-    bool                  _gotNonZeroUpdate;
-    double                _avgRtt;
-    double                _varRtt;
-    int64_t         _maxRtt;
-    uint32_t        _filtFactCount;
-    const uint32_t  _filtFactMax;
-    const double          _jumpStdDevs;
-    const double          _driftStdDevs;
-    int32_t         _jumpCount;
-    int32_t         _driftCount;
-    const int32_t   _detectThreshold;
-    int64_t         _jumpBuf[kMaxDriftJumpCount];
-    int64_t         _driftBuf[kMaxDriftJumpCount];
+  bool _gotNonZeroUpdate;
+  double _avgRtt;
+  double _varRtt;
+  int64_t _maxRtt;
+  uint32_t _filtFactCount;
+  const uint32_t _filtFactMax;
+  const double _jumpStdDevs;
+  const double _driftStdDevs;
+  int32_t _jumpCount;
+  int32_t _driftCount;
+  const int32_t _detectThreshold;
+  int64_t _jumpBuf[kMaxDriftJumpCount];
+  int64_t _driftBuf[kMaxDriftJumpCount];
 };
 
 }  // namespace webrtc
 
-#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/session_info.cc b/webrtc/modules/video_coding/session_info.cc
index bd204de..8701098 100644
--- a/webrtc/modules/video_coding/session_info.cc
+++ b/webrtc/modules/video_coding/session_info.cc
@@ -32,8 +32,7 @@
       empty_seq_num_low_(-1),
       empty_seq_num_high_(-1),
       first_packet_seq_num_(-1),
-      last_packet_seq_num_(-1) {
-}
+      last_packet_seq_num_(-1) {}
 
 void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
                                         const uint8_t* new_base_ptr) {
@@ -88,8 +87,8 @@
   if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
     return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
   } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
-    return
-        packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+    return packets_.front()
+        .codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
   } else {
     return false;
   }
@@ -193,9 +192,7 @@
     while (nalu_ptr < packet_buffer + packet.sizeBytes) {
       size_t length = BufferToUWord16(nalu_ptr);
       nalu_ptr += kLengthFieldLength;
-      frame_buffer_ptr += Insert(nalu_ptr,
-                                 length,
-                                 packet.insertStartCode,
+      frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
                                  const_cast<uint8_t*>(frame_buffer_ptr));
       nalu_ptr += length;
     }
@@ -203,14 +200,12 @@
     return packet.sizeBytes;
   }
   ShiftSubsequentPackets(
-      packet_it,
-      packet.sizeBytes +
-          (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+      packet_it, packet.sizeBytes +
+                     (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
 
-  packet.sizeBytes = Insert(packet_buffer,
-                            packet.sizeBytes,
-                            packet.insertStartCode,
-                            const_cast<uint8_t*>(packet.dataPtr));
+  packet.sizeBytes =
+      Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
+             const_cast<uint8_t*>(packet.dataPtr));
   return packet.sizeBytes;
 }
 
@@ -223,8 +218,7 @@
     memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
   }
   memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
-         buffer,
-         length);
+         buffer, length);
   length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
 
   return length;
@@ -276,13 +270,12 @@
   // thresholds.
   const float kLowPacketPercentageThreshold = 0.2f;
   const float kHighPacketPercentageThreshold = 0.8f;
-  if (frame_data.rtt_ms < kRttThreshold
-      || frame_type_ == kVideoFrameKey
-      || !HaveFirstPacket()
-      || (NumPackets() <= kHighPacketPercentageThreshold
-                          * frame_data.rolling_average_packets_per_frame
-          && NumPackets() > kLowPacketPercentageThreshold
-                            * frame_data.rolling_average_packets_per_frame))
+  if (frame_data.rtt_ms < kRttThreshold || frame_type_ == kVideoFrameKey ||
+      !HaveFirstPacket() ||
+      (NumPackets() <= kHighPacketPercentageThreshold *
+                           frame_data.rolling_average_packets_per_frame &&
+       NumPackets() > kLowPacketPercentageThreshold *
+                          frame_data.rolling_average_packets_per_frame))
     return;
 
   decodable_ = true;
@@ -308,7 +301,7 @@
   // Find the end of the NAL unit.
   for (; packet_it != packets_.end(); ++packet_it) {
     if (((*packet_it).completeNALU == kNaluComplete &&
-        (*packet_it).sizeBytes > 0) ||
+         (*packet_it).sizeBytes > 0) ||
         // Found next NALU.
         (*packet_it).completeNALU == kNaluStart)
       return --packet_it;
@@ -348,7 +341,7 @@
   memset(fragmentation->fragmentationLength, 0,
          kMaxVP8Partitions * sizeof(size_t));
   if (packets_.empty())
-      return new_length;
+    return new_length;
   PacketIterator it = FindNextPartitionBeginning(packets_.begin());
   while (it != packets_.end()) {
     const int partition_id =
@@ -371,7 +364,7 @@
   // Set all empty fragments to start where the previous fragment ends,
   // and have zero length.
   if (fragmentation->fragmentationLength[0] == 0)
-      fragmentation->fragmentationOffset[0] = 0;
+    fragmentation->fragmentationOffset[0] = 0;
   for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
     if (fragmentation->fragmentationLength[i] == 0)
       fragmentation->fragmentationOffset[i] =
@@ -379,7 +372,7 @@
           fragmentation->fragmentationLength[i - 1];
     assert(i == 0 ||
            fragmentation->fragmentationOffset[i] >=
-           fragmentation->fragmentationOffset[i - 1]);
+               fragmentation->fragmentationOffset[i - 1]);
   }
   assert(new_length <= frame_buffer_length);
   return new_length;
@@ -424,8 +417,8 @@
   // If the two iterators are pointing to the same packet they are considered
   // to be in sequence.
   return (packet_it == prev_packet_it ||
-      (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
-          (*packet_it).seqNum));
+          (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+           (*packet_it).seqNum));
 }
 
 size_t VCMSessionInfo::MakeDecodable() {
@@ -435,8 +428,7 @@
   }
   PacketIterator it = packets_.begin();
   // Make sure we remove the first NAL unit if it's not decodable.
-  if ((*it).completeNALU == kNaluIncomplete ||
-      (*it).completeNALU == kNaluEnd) {
+  if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
     PacketIterator nalu_end = FindNaluEnd(it);
     return_length += DeletePacketData(it, nalu_end);
     it = nalu_end;
@@ -445,7 +437,7 @@
   // Take care of the rest of the NAL units.
   for (; it != packets_.end(); ++it) {
     bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
-        (*it).completeNALU == kNaluComplete);
+                          (*it).completeNALU == kNaluComplete);
     if (!start_of_nalu && !InSequence(it, prev_it)) {
       // Found a sequence number gap due to packet loss.
       PacketIterator nalu_end = FindNaluEnd(it);
@@ -463,18 +455,15 @@
   decodable_ = false;
 }
 
-bool
-VCMSessionInfo::HaveFirstPacket() const {
+bool VCMSessionInfo::HaveFirstPacket() const {
   return !packets_.empty() && (first_packet_seq_num_ != -1);
 }
 
-bool
-VCMSessionInfo::HaveLastPacket() const {
+bool VCMSessionInfo::HaveLastPacket() const {
   return !packets_.empty() && (last_packet_seq_num_ != -1);
 }
 
-bool
-VCMSessionInfo::session_nack() const {
+bool VCMSessionInfo::session_nack() const {
   return session_nack_;
 }
 
@@ -502,8 +491,8 @@
       break;
 
   // Check for duplicate packets.
-  if (rit != packets_.rend() &&
-      (*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
+  if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
+      (*rit).sizeBytes > 0)
     return -2;
 
   if (packet.codec == kVideoCodecH264) {
@@ -572,8 +561,8 @@
     empty_seq_num_high_ = seq_num;
   else
     empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
-  if (empty_seq_num_low_ == -1 || IsNewerSequenceNumber(empty_seq_num_low_,
-                                                        seq_num))
+  if (empty_seq_num_low_ == -1 ||
+      IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
     empty_seq_num_low_ = seq_num;
 }
 
diff --git a/webrtc/modules/video_coding/session_info.h b/webrtc/modules/video_coding/session_info.h
index b2f1df0..e9ff251 100644
--- a/webrtc/modules/video_coding/session_info.h
+++ b/webrtc/modules/video_coding/session_info.h
@@ -116,8 +116,7 @@
   PacketIterator FindPartitionEnd(PacketIterator it) const;
   static bool InSequence(const PacketIterator& it,
                          const PacketIterator& prev_it);
-  size_t InsertBuffer(uint8_t* frame_buffer,
-                      PacketIterator packetIterator);
+  size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
   size_t Insert(const uint8_t* buffer,
                 size_t length,
                 bool insert_start_code,
@@ -126,8 +125,7 @@
   PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
   // Deletes the data of all packets between |start| and |end|, inclusively.
   // Note that this function doesn't delete the actual packets.
-  size_t DeletePacketData(PacketIterator start,
-                          PacketIterator end);
+  size_t DeletePacketData(PacketIterator start, PacketIterator end);
   void UpdateCompleteSession();
 
   // When enabled, determine if session is decodable, i.e. incomplete but
diff --git a/webrtc/modules/video_coding/session_info_unittest.cc b/webrtc/modules/video_coding/session_info_unittest.cc
index 4906048..4019d63 100644
--- a/webrtc/modules/video_coding/session_info_unittest.cc
+++ b/webrtc/modules/video_coding/session_info_unittest.cc
@@ -81,7 +81,7 @@
               fragmentation_.fragmentationLength[partition_id]);
     for (int i = 0; i < packets_expected; ++i) {
       size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
-          i * packet_buffer_size();
+                            i * packet_buffer_size();
       if (packet_index + packet_buffer_size() > frame_buffer_size())
         return false;
       VerifyPacket(frame_buffer_ + packet_index, start_value + i);
@@ -122,8 +122,7 @@
     memset(seq_num_list_, 0, sizeof(seq_num_list_));
   }
 
-  void BuildSeqNumList(uint16_t low,
-                       uint16_t high) {
+  void BuildSeqNumList(uint16_t low, uint16_t high) {
     size_t i = 0;
     while (low != high + 1) {
       EXPECT_LT(i, kMaxSeqNumListLength);
@@ -173,14 +172,11 @@
   // To make things more difficult we will make sure to have a wrap here.
   packet_.isFirstPacket = false;
   packet_.markerBit = true;
-  packet_.seqNum  = 2;
+  packet_.seqNum = 2;
   packet_.sizeBytes = 0;
   packet_.frameType = kEmptyFrame;
-  EXPECT_EQ(0,
-            session_.InsertPacket(packet_,
-                                  frame_buffer_,
-                                  kNoErrors,
-                                  frame_data));
+  EXPECT_EQ(
+      0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
   EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
 }
 
@@ -198,9 +194,8 @@
     packet_.seqNum += 1;
     FillPacket(i);
     ASSERT_EQ(packet_buffer_size(),
-              static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                        kNoErrors,
-                                                        frame_data)));
+              static_cast<size_t>(session_.InsertPacket(
+                  packet_, frame_buffer_, kNoErrors, frame_data)));
   }
 
   packet_.seqNum += 1;
@@ -223,9 +218,8 @@
   packet_.markerBit = false;
   FillPacket(3);
   EXPECT_EQ(packet_buffer_size(),
-            static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                      kWithErrors,
-                                                      frame_data)));
+            static_cast<size_t>(session_.InsertPacket(
+                packet_, frame_buffer_, kWithErrors, frame_data)));
   EXPECT_TRUE(session_.decodable());
 }
 
@@ -237,18 +231,16 @@
   frame_data.rolling_average_packets_per_frame = 11;
   frame_data.rtt_ms = 150;
   EXPECT_EQ(packet_buffer_size(),
-            static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                      kSelectiveErrors,
-                                                      frame_data)));
+            static_cast<size_t>(session_.InsertPacket(
+                packet_, frame_buffer_, kSelectiveErrors, frame_data)));
   EXPECT_FALSE(session_.decodable());
 
   packet_.seqNum -= 1;
   FillPacket(0);
   packet_.isFirstPacket = true;
   EXPECT_EQ(packet_buffer_size(),
-            static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                      kSelectiveErrors,
-                                                      frame_data)));
+            static_cast<size_t>(session_.InsertPacket(
+                packet_, frame_buffer_, kSelectiveErrors, frame_data)));
   EXPECT_TRUE(session_.decodable());
 
   packet_.isFirstPacket = false;
@@ -256,19 +248,17 @@
   for (int i = 2; i < 8; ++i) {
     packet_.seqNum += 1;
     FillPacket(i);
-  EXPECT_EQ(packet_buffer_size(),
-            static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                      kSelectiveErrors,
-                                                      frame_data)));
+    EXPECT_EQ(packet_buffer_size(),
+              static_cast<size_t>(session_.InsertPacket(
+                  packet_, frame_buffer_, kSelectiveErrors, frame_data)));
     EXPECT_TRUE(session_.decodable());
   }
 
   packet_.seqNum += 1;
   FillPacket(8);
   EXPECT_EQ(packet_buffer_size(),
-            static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
-                                                      kSelectiveErrors,
-                                                      frame_data)));
+            static_cast<size_t>(session_.InsertPacket(
+                packet_, frame_buffer_, kSelectiveErrors, frame_data)));
   EXPECT_TRUE(session_.decodable());
 }
 
@@ -285,18 +275,14 @@
   packet_.isFirstPacket = true;
   packet_.markerBit = true;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
   packet_.seqNum = 0x0000;
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 }
 
 TEST_F(TestSessionInfo, SetMarkerBitOnce) {
@@ -311,10 +297,8 @@
   packet_.isFirstPacket = true;
   packet_.markerBit = true;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 }
 
 TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
@@ -331,10 +315,8 @@
   packet_.isFirstPacket = true;
   packet_.markerBit = true;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
   packet_.seqNum = 0x0006;
   packet_.isFirstPacket = true;
   packet_.markerBit = true;
@@ -346,10 +328,8 @@
   packet_.isFirstPacket = false;
   packet_.markerBit = true;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 }
 
 TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
@@ -379,20 +359,14 @@
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
   FillPacket(1);
-  EXPECT_EQ(-3,
-            session_.InsertPacket(packet_,
-                                  frame_buffer_,
-                                  kNoErrors,
-                                  frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
   packet_.seqNum = 0x0006;
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
   FillPacket(1);
-  EXPECT_EQ(-3,
-            session_.InsertPacket(packet_,
-                                  frame_buffer_,
-                                  kNoErrors,
-                                  frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 }
 
 TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
@@ -417,10 +391,8 @@
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
   packet_.seqNum = 0x0010;
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
@@ -440,10 +412,8 @@
   packet_.isFirstPacket = false;
   packet_.markerBit = false;
   FillPacket(1);
-  EXPECT_EQ(-3, session_.InsertPacket(packet_,
-                                      frame_buffer_,
-                                      kNoErrors,
-                                      frame_data));
+  EXPECT_EQ(
+      -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 }
 
 TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
@@ -455,8 +425,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 0;
   FillPacket(0);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -505,8 +475,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 1;
   FillPacket(1);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -567,8 +537,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 0xfffd;
   FillPacket(0);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -629,8 +599,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 0xfffd;
   FillPacket(0);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -682,7 +652,6 @@
   EXPECT_TRUE(VerifyPartition(1, 1, 2));
 }
 
-
 TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
   // Partition 1  |Partition 2    | Partition 3
   // [ 1 ] [ 2 ]  |               | [ 5 ] | [ 6 ]
@@ -692,8 +661,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 1;
   FillPacket(1);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -754,8 +723,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 1;
   FillPacket(1);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -767,8 +736,7 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber += 1;
   FillPacket(2);
-  packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                         packet_header_);
+  packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -841,8 +809,8 @@
   packet_header_.header.markerBit = false;
   packet_header_.header.sequenceNumber = 0;
   FillPacket(0);
-  VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
-                                    packet_header_);
+  VCMPacket* packet =
+      new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
   EXPECT_EQ(packet_buffer_size(),
             static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
                                                       kNoErrors, frame_data)));
@@ -892,10 +860,8 @@
   packet_.sizeBytes = 0;
   packet_.seqNum = 0;
   packet_.markerBit = false;
-  EXPECT_EQ(0, session_.InsertPacket(packet_,
-                                     frame_buffer_,
-                                     kNoErrors,
-                                     frame_data));
+  EXPECT_EQ(
+      0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
 
   EXPECT_EQ(0U, session_.MakeDecodable());
   EXPECT_EQ(0U, session_.SessionLength());
diff --git a/webrtc/modules/video_coding/video_receiver.cc b/webrtc/modules/video_coding/video_receiver.cc
index f074832..02c0da8 100644
--- a/webrtc/modules/video_coding/video_receiver.cc
+++ b/webrtc/modules/video_coding/video_receiver.cc
@@ -31,7 +31,7 @@
       _receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
       _timing(clock_),
       _receiver(&_timing, clock_, event_factory),
-      _decodedFrameCallback(_timing, clock_),
+      _decodedFrameCallback(&_timing, clock_),
       _frameTypeCallback(NULL),
       _receiveStatsCallback(NULL),
       _decoderTimingCallback(NULL),
@@ -84,20 +84,12 @@
       int jitter_buffer_ms;
       int min_playout_delay_ms;
       int render_delay_ms;
-      _timing.GetTimings(&decode_ms,
-                         &max_decode_ms,
-                         &current_delay_ms,
-                         &target_delay_ms,
-                         &jitter_buffer_ms,
-                         &min_playout_delay_ms,
-                         &render_delay_ms);
-      _decoderTimingCallback->OnDecoderTiming(decode_ms,
-                                              max_decode_ms,
-                                              current_delay_ms,
-                                              target_delay_ms,
-                                              jitter_buffer_ms,
-                                              min_playout_delay_ms,
-                                              render_delay_ms);
+      _timing.GetTimings(&decode_ms, &max_decode_ms, &current_delay_ms,
+                         &target_delay_ms, &jitter_buffer_ms,
+                         &min_playout_delay_ms, &render_delay_ms);
+      _decoderTimingCallback->OnDecoderTiming(
+          decode_ms, max_decode_ms, current_delay_ms, target_delay_ms,
+          jitter_buffer_ms, min_playout_delay_ms, render_delay_ms);
     }
 
     // Size of render buffer.
@@ -285,7 +277,7 @@
   }
 
   VCMEncodedFrame* frame = _receiver.FrameForDecoding(
-      maxWaitTimeMs, nextRenderTimeMs, prefer_late_decoding);
+      maxWaitTimeMs, &nextRenderTimeMs, prefer_late_decoding);
 
   if (!frame)
     return VCM_FRAME_NOT_READY;
@@ -353,12 +345,8 @@
 
 // Must be called from inside the receive side critical section.
 int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
-  TRACE_EVENT_ASYNC_STEP1("webrtc",
-                          "Video",
-                          frame.TimeStamp(),
-                          "Decode",
-                          "type",
-                          frame.FrameType());
+  TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame.TimeStamp(), "Decode",
+                          "type", frame.FrameType());
   // Change decoder if payload type has changed
   _decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
   if (_decoder == NULL) {
@@ -419,8 +407,8 @@
   if (receiveCodec == NULL) {
     return VCM_PARAMETER_ERROR;
   }
-  if (!_codecDataBase.RegisterReceiveCodec(
-          receiveCodec, numberOfCores, requireKeyFrame)) {
+  if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores,
+                                           requireKeyFrame)) {
     return -1;
   }
   return 0;
@@ -446,9 +434,7 @@
                                       size_t payloadLength,
                                       const WebRtcRTPHeader& rtpInfo) {
   if (rtpInfo.frameType == kVideoFrameKey) {
-    TRACE_EVENT1("webrtc",
-                 "VCM::PacketKeyFrame",
-                 "seqnum",
+    TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
                  rtpInfo.header.sequenceNumber);
   }
   if (incomingPayload == NULL) {
@@ -487,7 +473,9 @@
 }
 
 // Current video delay
-int32_t VideoReceiver::Delay() const { return _timing.TargetVideoDelay(); }
+int32_t VideoReceiver::Delay() const {
+  return _timing.TargetVideoDelay();
+}
 
 uint32_t VideoReceiver::DiscardedPackets() const {
   return _receiver.DiscardedPackets();
@@ -543,8 +531,8 @@
     CriticalSectionScoped process_cs(process_crit_sect_.get());
     max_nack_list_size_ = max_nack_list_size;
   }
-  _receiver.SetNackSettings(
-      max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+  _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+                            max_incomplete_time_ms);
 }
 
 int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {