Update comments for benchmark test
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py
index 140c2ee..f8c2828 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py
@@ -24,14 +24,6 @@
 
 class AntirectifierBenchmark(tf.test.Benchmark):
   """Benchmarks for Antirectifier using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(AntirectifierBenchmark, self).__init__()
@@ -54,6 +46,15 @@
     )
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_pixel_cnn_bs_128(self):
     """Measure performance with batch_size=128 and run_iters=2."""
     batch_size = 128
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py
index e7d426d..63e99e3 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py
@@ -25,15 +25,6 @@
 class BidirectionalLSTMBenchmark(tf.test.Benchmark):
   """Benchmarks for Bidirectional LSTM using `tf.test.Benchmark`."""
 
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
-
   def __init__(self):
     super(BidirectionalLSTMBenchmark, self).__init__()
     self.max_feature = 20000
@@ -55,6 +46,15 @@
     model = tf.keras.Model(inputs, outputs)
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_bidirect_lstm_imdb_bs_128(self):
     """Measure performance with batch_size=128 and run_iters=3."""
     batch_size = 128
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
index 9806307..6bf5f8f 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
@@ -24,14 +24,6 @@
 
 class Cifar10CNNBenchmark(tf.test.Benchmark):
   """Benchmarks for CNN using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(Cifar10CNNBenchmark, self).__init__()
@@ -70,6 +62,15 @@
     model.add(tf.keras.layers.Activation('softmax'))
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_cnn_cifar10_bs_256(self):
     """Measure performance with batch_size=256 and run_iters=3."""
     batch_size = 256
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py
index d828e26..150d432 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py
@@ -26,14 +26,6 @@
 
 class ConvMnistBenchmark(tf.test.Benchmark):
   """Benchmarks for Convnet using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(ConvMnistBenchmark, self).__init__()
@@ -66,6 +58,15 @@
     )
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_conv_mnist_bs_128(self):
     """Measure performance with batch_size=128 and run_iters=2."""
     batch_size = 128
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py
index 82cbe56..ad45971 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py
@@ -24,14 +24,6 @@
 
 class HierarchicalRNNBenchmark(tf.test.Benchmark):
   """Benchmarks for Hierarchical RNN using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(HierarchicalRNNBenchmark, self).__init__()
@@ -58,6 +50,15 @@
 
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_hrnn_mnist_bs_256(self):
     """Measure performance with batch_size=256 and run_iters=4."""
     batch_size = 256
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py
index 8d6f229..b455fdb 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py
@@ -24,14 +24,6 @@
 
 class IRNNMnistBenchmark(tf.test.Benchmark):
   """Benchmarks for IRNN using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(IRNNMnistBenchmark, self).__init__()
@@ -59,6 +51,15 @@
     model.add(tf.keras.layers.Activation('softmax'))
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_irnn_mnist_bs_256(self):
     """Measure performance with batch_size=256 and run_iters=4."""
     batch_size = 256
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py
index 064b5a4..03cf855 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py
@@ -26,14 +26,6 @@
 
 class MLPReutersBenchmark(tf.test.Benchmark):
   """Benchmarks for MLP using `tf.test.Benchmark`."""
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
 
   def __init__(self):
     super(MLPReutersBenchmark, self).__init__()
@@ -59,6 +51,15 @@
     model.add(tf.keras.layers.Activation('softmax'))
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_mlp_reuters_bs_128(self):
     """Measure performance with batch_size=128 and run_iters=2."""
     batch_size = 128
diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py
index 2051530..26bd92c 100644
--- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py
+++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py
@@ -24,19 +24,9 @@
 
 class TextWithTransformerBenchmark(tf.test.Benchmark):
   """Benchmarks for Text classification with Transformer
-
   using `tf.test.Benchmark`.
   """
 
-  # Required Arguments for measure_performance.
-  #   x: Input data, it could be Numpy or load from tfds.
-  #   y: Target data. If `x` is a dataset, generator instance,
-  #      `y` should not be specified.
-  #   loss: Loss function for model.
-  #   optimizer: Optimizer for model.
-  #   Other details can see in `measure_performance()` method of
-  #   benchmark_util.
-
   def __init__(self):
     super(TextWithTransformerBenchmark, self).__init__()
     self.max_feature = 20000
@@ -66,6 +56,15 @@
     model = tf.keras.Model(inputs=inputs, outputs=outputs)
     return model
 
+  # In each benchmark test, the required arguments for the
+  # method `measure_performance` include:
+  #   x: Input data, it could be Numpy or loaded from tfds.
+  #   y: Target data. If `x` is a dataset or generator instance,
+  #      `y` should not be specified.
+  #   loss: Loss function for model.
+  #   optimizer: Optimizer for model.
+  #   Check more details in `measure_performance()` method of
+  #   benchmark_util.
   def benchmark_text_classification_bs_128(self):
     """Measure performance with batch_size=128 and run_iters=3."""
     batch_size = 128