Remove VideoFrameType aliases for FrameType.

No longer used in Chromium, so these can now be removed.

BUG=webrtc:5042
R=mflodman@webrtc.org
TBR=magjed@webrtc.org

Review URL: https://codereview.webrtc.org/1415693002 .

Cr-Commit-Position: refs/heads/master@{#10390}
diff --git a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
index fbe7fdf..618c119 100644
--- a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
@@ -474,7 +474,7 @@
 
   // Always start with a complete key frame.
   if (key_frame_required_) {
-    if (inputImage._frameType != webrtc::kKeyFrame) {
+    if (inputImage._frameType != webrtc::kVideoFrameKey) {
       ALOGE << "Decode() - key frame is required";
       return WEBRTC_VIDEO_CODEC_ERROR;
     }
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
index 3bbf682..a69db8b 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
@@ -590,7 +590,7 @@
   render_times_ms_.push_back(input_frame.render_time_ms());
   frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
 
-  bool key_frame = frame_types->front() != webrtc::kDeltaFrame;
+  bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
   bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
                                               j_encode_method_,
                                               key_frame,
@@ -769,7 +769,8 @@
       image->_encodedHeight = height_;
       image->_timeStamp = output_timestamp_;
       image->capture_time_ms_ = output_render_time_ms_;
-      image->_frameType = (key_frame ? webrtc::kKeyFrame : webrtc::kDeltaFrame);
+      image->_frameType =
+          (key_frame ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta);
       image->_completeFrame = true;
 
       webrtc::CodecSpecificInfo info;
diff --git a/webrtc/common_types.h b/webrtc/common_types.h
index 6b624bf..07faf6a 100644
--- a/webrtc/common_types.h
+++ b/webrtc/common_types.h
@@ -162,16 +162,8 @@
   kAudioFrameCN = 2,
   kVideoFrameKey = 3,
   kVideoFrameDelta = 4,
-  // TODO(pbos): Remove below aliases (non-kVideo prefixed) as soon as no
-  // VideoEncoder implementation in Chromium uses them.
-  kKeyFrame = kVideoFrameKey,
-  kDeltaFrame = kVideoFrameDelta,
 };
 
-// TODO(pbos): Remove VideoFrameType when VideoEncoder implementations no longer
-// depend on it.
-using VideoFrameType = FrameType;
-
 // Statistics for an RTCP channel
 struct RtcpStatistics {
   RtcpStatistics()
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index 22ace50..d677f8b 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -198,7 +198,8 @@
   frame._encodedWidth = encode_params->width;
   frame._encodedHeight = encode_params->height;
   frame._completeFrame = true;
-  frame._frameType = is_keyframe ? webrtc::kKeyFrame : webrtc::kDeltaFrame;
+  frame._frameType =
+      is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta;
   frame.capture_time_ms_ = encode_params->render_time_ms;
   frame._timeStamp = encode_params->timestamp;
 
@@ -277,7 +278,7 @@
   bool is_keyframe_required = false;
   if (frame_types) {
     for (auto frame_type : *frame_types) {
-      if (frame_type == kKeyFrame) {
+      if (frame_type == kVideoFrameKey) {
         is_keyframe_required = true;
         break;
       }
diff --git a/webrtc/modules/video_coding/codecs/i420/i420.cc b/webrtc/modules/video_coding/codecs/i420/i420.cc
index b2f82a0..cf546a0 100644
--- a/webrtc/modules/video_coding/codecs/i420/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/i420.cc
@@ -82,7 +82,7 @@
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
 
-  _encodedImage._frameType = kKeyFrame;
+  _encodedImage._frameType = kVideoFrameKey;
   _encodedImage._timeStamp = inputImage.timestamp();
   _encodedImage._encodedHeight = inputImage.height();
   _encodedImage._encodedWidth = inputImage.width();
diff --git a/webrtc/modules/video_coding/codecs/test/stats.cc b/webrtc/modules/video_coding/codecs/test/stats.cc
index 91a2f3c..f87407d 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats.cc
@@ -32,7 +32,7 @@
       total_packets(0),
       bit_rate_in_kbps(0),
       encoded_frame_length_in_bytes(0),
-      frame_type(kDeltaFrame) {}
+      frame_type(kVideoFrameDelta) {}
 
 Stats::Stats() {}
 
@@ -83,7 +83,7 @@
     total_encoding_time_in_us += it->encode_time_in_us;
     total_decoding_time_in_us += it->decode_time_in_us;
     total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
-    if (it->frame_type == webrtc::kKeyFrame) {
+    if (it->frame_type == webrtc::kVideoFrameKey) {
       total_encoded_key_frames_lengths += it->encoded_frame_length_in_bytes;
       nbr_keyframes++;
     } else {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index 3bb6b7a..7844891 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -59,7 +59,7 @@
       last_frame_missing_(false),
       initialized_(false),
       encoded_frame_size_(0),
-      encoded_frame_type_(kKeyFrame),
+      encoded_frame_type_(kVideoFrameKey),
       prev_time_stamp_(0),
       num_dropped_frames_(0),
       num_spatial_resizes_(0),
@@ -199,15 +199,15 @@
     source_frame_.set_timestamp(frame_number);
 
     // Decide if we're going to force a keyframe:
-    std::vector<FrameType> frame_types(1, kDeltaFrame);
+    std::vector<FrameType> frame_types(1, kVideoFrameDelta);
     if (config_.keyframe_interval > 0 &&
         frame_number % config_.keyframe_interval == 0) {
-      frame_types[0] = kKeyFrame;
+      frame_types[0] = kVideoFrameKey;
     }
 
     // For dropped frames, we regard them as zero size encoded frames.
     encoded_frame_size_ = 0;
-    encoded_frame_type_ = kDeltaFrame;
+    encoded_frame_type_ = kVideoFrameDelta;
 
     int32_t encode_result = encoder_->Encode(source_frame_, NULL, &frame_types);
 
@@ -257,7 +257,7 @@
   // Perform packet loss if criteria is fullfilled:
   bool exclude_this_frame = false;
   // Only keyframes can be excluded
-  if (encoded_image._frameType == kKeyFrame) {
+  if (encoded_image._frameType == kVideoFrameKey) {
     switch (config_.exclude_frame_types) {
       case kExcludeOnlyFirstKeyFrame:
         if (!first_key_frame_has_been_excluded_) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index d9148d6..6eaea8b 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -272,7 +272,7 @@
     float encoded_size_kbits = processor_->EncodedFrameSize() * 8.0f / 1000.0f;
     // Update layer data.
     // Update rate mismatch relative to per-frame bandwidth for delta frames.
-    if (frame_type == kDeltaFrame) {
+    if (frame_type == kVideoFrameDelta) {
       // TODO(marpan): Should we count dropped (zero size) frames in mismatch?
       sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
                                                per_frame_bandwidth_[layer_]) /
@@ -450,7 +450,7 @@
     ResetRateControlMetrics(
         rate_profile.frame_index_rate_update[update_index + 1]);
     int frame_number = 0;
-    FrameType frame_type = kDeltaFrame;
+    FrameType frame_type = kVideoFrameDelta;
     while (processor_->ProcessFrame(frame_number) &&
         frame_number < num_frames) {
       // Get the layer index for the frame |frame_number|.
diff --git a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
index f6eee39..bfcb2e6 100644
--- a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
+++ b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
@@ -425,7 +425,7 @@
            f.decode_return_code,
            f.bit_rate_in_kbps,
            f.encoded_frame_length_in_bytes,
-           f.frame_type == webrtc::kDeltaFrame ? "'Delta'" : "'Other'",
+           f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
            f.packets_dropped,
            f.total_packets,
            ssim.value,
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 4439267..5dc4ac7 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -246,7 +246,7 @@
   bool send_key_frame = false;
   if (frame_types) {
     for (size_t i = 0; i < frame_types->size(); ++i) {
-      if (frame_types->at(i) == kKeyFrame) {
+      if (frame_types->at(i) == kVideoFrameKey) {
         send_key_frame = true;
         break;
       }
@@ -269,10 +269,10 @@
 
     std::vector<FrameType> stream_frame_types;
     if (send_key_frame) {
-      stream_frame_types.push_back(kKeyFrame);
+      stream_frame_types.push_back(kVideoFrameKey);
       streaminfos_[stream_idx].key_frame_request = false;
     } else {
-      stream_frame_types.push_back(kDeltaFrame);
+      stream_frame_types.push_back(kVideoFrameDelta);
     }
 
     int dst_width = streaminfos_[stream_idx].width;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index a3d9e5a..e4fc986 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -70,12 +70,12 @@
                           const RTPFragmentationHeader* fragmentation) {
     // Only store the base layer.
     if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
-      if (encoded_image._frameType == kKeyFrame) {
+      if (encoded_image._frameType == kVideoFrameKey) {
         delete [] encoded_key_frame_._buffer;
         encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
         encoded_key_frame_._size = encoded_image._size;
         encoded_key_frame_._length = encoded_image._length;
-        encoded_key_frame_._frameType = kKeyFrame;
+        encoded_key_frame_._frameType = kVideoFrameKey;
         encoded_key_frame_._completeFrame = encoded_image._completeFrame;
         memcpy(encoded_key_frame_._buffer,
                encoded_image._buffer,
@@ -389,33 +389,34 @@
   // a key frame was only requested for some of them.
   void TestKeyFrameRequestsOnAllStreams() {
     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams);
+    ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    frame_types[0] = kKeyFrame;
-    ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
+    frame_types[0] = kVideoFrameKey;
+    ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
-    frame_types[1] = kKeyFrame;
-    ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
+    std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+    frame_types[1] = kVideoFrameKey;
+    ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
-    frame_types[2] = kKeyFrame;
-    ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
+    std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+    frame_types[2] = kVideoFrameKey;
+    ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
-    ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams);
+    std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+    ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -423,11 +424,12 @@
   void TestPaddingAllStreams() {
     // We should always encode the base layer.
     encoder_->SetRates(kMinBitrates[0] - 1, 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 1);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 1);
+    ExpectStreams(kVideoFrameDelta, 1);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -435,11 +437,12 @@
   void TestPaddingTwoStreams() {
     // We have just enough to get only the first stream and padding for two.
     encoder_->SetRates(kMinBitrates[0], 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 1);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 1);
+    ExpectStreams(kVideoFrameDelta, 1);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -448,11 +451,12 @@
     // We are just below limit of sending second stream, so we should get
     // the first stream maxed out (at |maxBitrate|), and padding for two.
     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 1);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 1);
+    ExpectStreams(kVideoFrameDelta, 1);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -460,11 +464,12 @@
   void TestPaddingOneStream() {
     // We have just enough to send two streams, so padding for one stream.
     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 2);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 2);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 2);
+    ExpectStreams(kVideoFrameDelta, 2);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -474,11 +479,12 @@
     // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2] - 1, 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 2);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 2);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 2);
+    ExpectStreams(kVideoFrameDelta, 2);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -487,11 +493,12 @@
     // We have just enough to send all streams.
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2], 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 3);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 3);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 3);
+    ExpectStreams(kVideoFrameDelta, 3);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -500,31 +507,32 @@
     // We should get three media streams.
     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
                        kMaxBitrates[2], 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    ExpectStreams(kKeyFrame, 3);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    ExpectStreams(kVideoFrameKey, 3);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
-    ExpectStreams(kDeltaFrame, 3);
+    ExpectStreams(kVideoFrameDelta, 3);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
     // We should only get two streams and padding for one.
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2] / 2, 30);
-    ExpectStreams(kDeltaFrame, 2);
+    ExpectStreams(kVideoFrameDelta, 2);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
     // We should only get the first stream and padding for two.
     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
-    ExpectStreams(kDeltaFrame, 1);
+    ExpectStreams(kVideoFrameDelta, 1);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
     // We don't have enough bitrate for the thumbnail stream, but we should get
     // it anyway with current configuration.
     encoder_->SetRates(kTargetBitrates[0] - 1, 30);
-    ExpectStreams(kDeltaFrame, 1);
+    ExpectStreams(kVideoFrameDelta, 1);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -532,7 +540,7 @@
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2] / 2, 30);
     // We get a key frame because a new stream is being enabled.
-    ExpectStreams(kKeyFrame, 2);
+    ExpectStreams(kVideoFrameKey, 2);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -540,7 +548,7 @@
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kTargetBitrates[2], 30);
     // We get a key frame because a new stream is being enabled.
-    ExpectStreams(kKeyFrame, 3);
+    ExpectStreams(kVideoFrameKey, 3);
     input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
   }
@@ -581,11 +589,13 @@
 
     // Encode one frame and verify.
     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
-    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
-    EXPECT_CALL(encoder_callback_, Encoded(
-        AllOf(Field(&EncodedImage::_frameType, kKeyFrame),
-              Field(&EncodedImage::_encodedWidth, width),
-              Field(&EncodedImage::_encodedHeight, height)), _, _))
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+                                       kVideoFrameDelta);
+    EXPECT_CALL(encoder_callback_,
+                Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
+                              Field(&EncodedImage::_encodedWidth, width),
+                              Field(&EncodedImage::_encodedHeight, height)),
+                        _, _))
         .Times(1)
         .WillRepeatedly(Return(0));
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
@@ -596,7 +606,7 @@
     settings_.startBitrate = kMinBitrates[0];
     EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
     encoder_->SetRates(settings_.startBitrate, 30);
-    ExpectStreams(kKeyFrame, 1);
+    ExpectStreams(kVideoFrameKey, 1);
     // Resize |input_frame_| to the new resolution.
     half_width = (settings_.width + 1) / 2;
     input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 382fc60..50a86b6 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -221,7 +221,7 @@
   encoder_->Encode(input_frame_, NULL, NULL);
   EXPECT_GT(WaitForEncodedFrame(), 0u);
   // First frame should be a key frame.
-  encoded_frame_._frameType = kKeyFrame;
+  encoded_frame_._frameType = kVideoFrameKey;
   encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame_, false, NULL));
@@ -241,12 +241,12 @@
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
             decoder_->Decode(encoded_frame_, false, NULL));
   // Setting complete back to true. Forcing a delta frame.
-  encoded_frame_._frameType = kDeltaFrame;
+  encoded_frame_._frameType = kVideoFrameDelta;
   encoded_frame_._completeFrame = true;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
             decoder_->Decode(encoded_frame_, false, NULL));
   // Now setting a key frame.
-  encoded_frame_._frameType = kKeyFrame;
+  encoded_frame_._frameType = kVideoFrameKey;
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             decoder_->Decode(encoded_frame_, false, NULL));
   EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 0c5256e..3c7cc4d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -792,7 +792,7 @@
   if (!send_key_frame && frame_types) {
     for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
          ++i) {
-      if ((*frame_types)[i] == kKeyFrame && send_stream_[i]) {
+      if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
         send_key_frame = true;
         break;
       }
@@ -970,7 +970,7 @@
     vpx_codec_iter_t iter = NULL;
     int part_idx = 0;
     encoded_images_[encoder_idx]._length = 0;
-    encoded_images_[encoder_idx]._frameType = kDeltaFrame;
+    encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
     RTPFragmentationHeader frag_info;
     // token_partitions_ is number of bits used.
     frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
@@ -1001,7 +1001,7 @@
       if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
         // check if encoded frame is a key frame
         if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
-          encoded_images_[encoder_idx]._frameType = kKeyFrame;
+          encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
           rps_.EncodedKeyFrame(picture_id_[stream_idx]);
         }
         PopulateCodecSpecific(&codec_specific, *pkt, stream_idx,
@@ -1172,7 +1172,7 @@
 
   // Always start with a complete key frame.
   if (key_frame_required_) {
-    if (input_image._frameType != kKeyFrame)
+    if (input_image._frameType != kVideoFrameKey)
       return WEBRTC_VIDEO_CODEC_ERROR;
     // We have a key frame - is it complete?
     if (input_image._completeFrame) {
@@ -1185,7 +1185,8 @@
   // the feedback mode is enabled (RPS).
   // Reset on a key frame refresh.
   if (!feedback_mode_) {
-    if (input_image._frameType == kKeyFrame && input_image._completeFrame) {
+    if (input_image._frameType == kVideoFrameKey &&
+        input_image._completeFrame) {
       propagation_cnt_ = -1;
     // Start count on first loss.
     } else if ((!input_image._completeFrame || missing_frames) &&
@@ -1238,7 +1239,7 @@
 #endif
 
   // Store encoded frame if key frame. (Used in Copy method.)
-  if (input_image._frameType == kKeyFrame && input_image._buffer != NULL) {
+  if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
     const uint32_t bytes_to_copy = input_image._length;
     if (last_keyframe_._size < bytes_to_copy) {
       delete [] last_keyframe_._buffer;
@@ -1272,7 +1273,7 @@
     // Whenever we receive an incomplete key frame all reference buffers will
     // be corrupt. If that happens we must request new key frames until we
     // decode a complete key frame.
-    if (input_image._frameType == kKeyFrame && !input_image._completeFrame)
+    if (input_image._frameType == kVideoFrameKey && !input_image._completeFrame)
       return WEBRTC_VIDEO_CODEC_ERROR;
     // Check for reference updates and last reference buffer corruption and
     // signal successful reference propagation or frame corruption to the
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 836a131..59574ae 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -432,7 +432,7 @@
   if (encoded_complete_callback_ == NULL) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
-  FrameType frame_type = kDeltaFrame;
+  FrameType frame_type = kVideoFrameDelta;
   // We only support one stream at the moment.
   if (frame_types && frame_types->size() > 0) {
     frame_type = (*frame_types)[0];
@@ -456,7 +456,7 @@
   raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
 
   int flags = 0;
-  bool send_keyframe = (frame_type == kKeyFrame);
+  bool send_keyframe = (frame_type == kVideoFrameKey);
   if (send_keyframe) {
     // Key frame request from caller.
     flags = VPX_EFLAG_FORCE_KF;
@@ -560,7 +560,7 @@
 
 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
   encoded_image_._length = 0;
-  encoded_image_._frameType = kDeltaFrame;
+  encoded_image_._frameType = kVideoFrameDelta;
   RTPFragmentationHeader frag_info;
   // Note: no data partitioning in VP9, so 1 partition only. We keep this
   // fragmentation data for now, until VP9 packetizer is implemented.
@@ -582,7 +582,7 @@
   // End of frame.
   // Check if encoded frame is a key frame.
   if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
-    encoded_image_._frameType = kKeyFrame;
+    encoded_image_._frameType = kVideoFrameKey;
   }
   PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp());
 
@@ -688,7 +688,7 @@
   }
   // Always start with a complete key frame.
   if (key_frame_required_) {
-    if (input_image._frameType != kKeyFrame)
+    if (input_image._frameType != kVideoFrameKey)
       return WEBRTC_VIDEO_CODEC_ERROR;
     // We have a key frame - is it complete?
     if (input_image._completeFrame) {
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.cc b/webrtc/modules/video_coding/main/source/encoded_frame.cc
index 92d2cd0..d86704d 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.cc
@@ -89,7 +89,7 @@
     _renderTimeMs = -1;
     _timeStamp = 0;
     _payloadType = 0;
-    _frameType = kDeltaFrame;
+    _frameType = kVideoFrameDelta;
     _encodedWidth = 0;
     _encodedHeight = 0;
     _completeFrame = false;
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.cc b/webrtc/modules/video_coding/main/source/media_optimization.cc
index b4c1be2..327c8b4 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.cc
+++ b/webrtc/modules/video_coding/main/source/media_optimization.cc
@@ -372,7 +372,7 @@
   UpdateSentBitrate(now_ms);
   UpdateSentFramerate();
   if (encoded_length > 0) {
-    const bool delta_frame = encoded_image._frameType != kKeyFrame;
+    const bool delta_frame = encoded_image._frameType != kVideoFrameKey;
 
     frame_dropper_->Fill(encoded_length, delta_frame);
     if (max_payload_size_ > 0 && encoded_length > 0) {
diff --git a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
index 5031015..dadc324 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
@@ -38,7 +38,7 @@
       EncodedImage encoded_image;
       encoded_image._length = bytes_per_frame;
       encoded_image._timeStamp = next_timestamp_;
-      encoded_image._frameType = kKeyFrame;
+      encoded_image._frameType = kVideoFrameKey;
       ASSERT_EQ(VCM_OK, media_opt_.UpdateWithEncodedData(encoded_image));
     }
     next_timestamp_ += frame_time_ms_ * kSampleRate / 1000;
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
index 13e6b99..a4b40ba 100644
--- a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
@@ -236,16 +236,16 @@
       // No intra request expected.
       EXPECT_CALL(
           encoder_,
-          Encode(_,
-                 _,
-                 Pointee(ElementsAre(kDeltaFrame, kDeltaFrame, kDeltaFrame))))
-          .Times(1).WillRepeatedly(Return(0));
+          Encode(_, _, Pointee(ElementsAre(kVideoFrameDelta, kVideoFrameDelta,
+                                           kVideoFrameDelta))))
+          .Times(1)
+          .WillRepeatedly(Return(0));
       return;
     }
     assert(stream >= 0);
     assert(stream < kNumberOfStreams);
-    std::vector<FrameType> frame_types(kNumberOfStreams, kDeltaFrame);
-    frame_types[stream] = kKeyFrame;
+    std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
+    frame_types[stream] = kVideoFrameKey;
     EXPECT_CALL(
         encoder_,
         Encode(_,
diff --git a/webrtc/test/configurable_frame_size_encoder.cc b/webrtc/test/configurable_frame_size_encoder.cc
index 2dbda38..3d9cf39 100644
--- a/webrtc/test/configurable_frame_size_encoder.cc
+++ b/webrtc/test/configurable_frame_size_encoder.cc
@@ -47,7 +47,7 @@
   encodedImage._completeFrame = true;
   encodedImage._encodedHeight = inputImage.height();
   encodedImage._encodedWidth = inputImage.width();
-  encodedImage._frameType = kKeyFrame;
+  encodedImage._frameType = kVideoFrameKey;
   encodedImage._timeStamp = inputImage.timestamp();
   encodedImage.capture_time_ms_ = inputImage.render_time_ms();
   RTPFragmentationHeader* fragmentation = NULL;
diff --git a/webrtc/video/send_statistics_proxy.cc b/webrtc/video/send_statistics_proxy.cc
index 32b7902..daaa558 100644
--- a/webrtc/video/send_statistics_proxy.cc
+++ b/webrtc/video/send_statistics_proxy.cc
@@ -189,7 +189,7 @@
   stats->height = encoded_image._encodedHeight;
   update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds();
 
-  key_frame_counter_.Add(encoded_image._frameType == kKeyFrame);
+  key_frame_counter_.Add(encoded_image._frameType == kVideoFrameKey);
 
   if (encoded_image.adapt_reason_.quality_resolution_downscales != -1) {
     bool downscaled =
diff --git a/webrtc/video/video_decoder.cc b/webrtc/video/video_decoder.cc
index e8dc5f1..4438cb0 100644
--- a/webrtc/video/video_decoder.cc
+++ b/webrtc/video/video_decoder.cc
@@ -87,7 +87,7 @@
     int64_t render_time_ms) {
   // Try decoding with the provided decoder on every keyframe or when there's no
   // fallback decoder. This is the normal case.
-  if (!fallback_decoder_ || input_image._frameType == kKeyFrame) {
+  if (!fallback_decoder_ || input_image._frameType == kVideoFrameKey) {
     int32_t ret = decoder_->Decode(input_image, missing_frames, fragmentation,
                                    codec_specific_info, render_time_ms);
     if (ret == WEBRTC_VIDEO_CODEC_OK) {
diff --git a/webrtc/video/video_decoder_unittest.cc b/webrtc/video/video_decoder_unittest.cc
index 4ad0456..be09b19 100644
--- a/webrtc/video/video_decoder_unittest.cc
+++ b/webrtc/video/video_decoder_unittest.cc
@@ -86,13 +86,13 @@
       << "Decoder used even though fallback should be active.";
 
   // Should be able to recover on a keyframe.
-  encoded_image._frameType = kKeyFrame;
+  encoded_image._frameType = kVideoFrameKey;
   fake_decoder_.decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
   fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
   EXPECT_EQ(2, fake_decoder_.decode_count_)
       << "Wrapper did not try to decode a keyframe using registered decoder.";
 
-  encoded_image._frameType = kDeltaFrame;
+  encoded_image._frameType = kVideoFrameDelta;
   fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
   EXPECT_EQ(3, fake_decoder_.decode_count_)
       << "Decoder not used on future delta frames.";
diff --git a/webrtc/video/video_encoder_unittest.cc b/webrtc/video/video_encoder_unittest.cc
index f848356..3382be8 100644
--- a/webrtc/video/video_encoder_unittest.cc
+++ b/webrtc/video/video_encoder_unittest.cc
@@ -110,7 +110,7 @@
   memset(frame_.buffer(webrtc::kVPlane), 128,
          frame_.allocated_size(webrtc::kVPlane));
 
-  std::vector<FrameType> types(1, kKeyFrame);
+  std::vector<FrameType> types(1, kVideoFrameKey);
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             fallback_wrapper_.Encode(frame_, nullptr, &types));
 }
@@ -205,7 +205,7 @@
   EXPECT_EQ(&callback2, fake_encoder_.encode_complete_callback_);
 
   // Encoding a frame using the fallback should arrive at the new callback.
-  std::vector<FrameType> types(1, kKeyFrame);
+  std::vector<FrameType> types(1, kVideoFrameKey);
   frame_.set_timestamp(frame_.timestamp() + 1000);
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             fallback_wrapper_.Encode(frame_, nullptr, &types));
diff --git a/webrtc/video_frame.h b/webrtc/video_frame.h
index ecc23c9..fb7d735 100644
--- a/webrtc/video_frame.h
+++ b/webrtc/video_frame.h
@@ -193,7 +193,7 @@
   // NTP time of the capture time in local timebase in milliseconds.
   int64_t ntp_time_ms_ = 0;
   int64_t capture_time_ms_ = 0;
-  FrameType _frameType = kDeltaFrame;
+  FrameType _frameType = kVideoFrameDelta;
   uint8_t* _buffer;
   size_t _length;
   size_t _size;