Improve comments and style
diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc
index 10805da..e9c587c 100644
--- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc
+++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc
@@ -1360,7 +1360,7 @@
}
if (!use_implicit_batch_ && profiles) {
profiles->ConfigureBuilder(trt_builder_.get(), builder_config.get(),
- network());
+ network());
}
VLOG(1) << "Building TensorRT engine";
engine->reset(
diff --git a/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc b/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc
index 6d159b8..1646f30 100644
--- a/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc
+++ b/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.cc
@@ -106,10 +106,9 @@
nvinfer1::ICudaEngine* engine,
std::vector<TrtUniquePtrType<nvinfer1::IExecutionContext>>& exec_context) {
int i = 0;
- // The following loops runs once if we have static shapes, to create a single
- // execution context without profiles.
- // In dynamic mode we create one context for each profile and set the
- // corresponding optimization profile.
+ // The following loop runs once if we have static shapes, to create a single
+ // execution context without profiles. In dynamic mode we create one context
+ // for each profile and set the corresponding optimization profile.
do {
VLOG(1) << "Creating execution context " << i;
nvinfer1::IExecutionContext* ctx = engine->createExecutionContext();
diff --git a/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h b/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h
index a4b9857..b445c4b 100644
--- a/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h
+++ b/tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h
@@ -37,7 +37,7 @@
namespace tensorflow {
namespace tensorrt {
-// Stores optimization profile parameters (min/opt/max of each input shape)
+// Stores optimization profile parameters (min/opt/max of each input shape).
//
// A TensorRT optimization profile describes the possible min/max values of
// each dynamic input shape along with an optimum value. These values are used
@@ -112,10 +112,9 @@
// optimization.
//
// This class stores the list of input shapes that were seen during the
-// build/profile_generation_mode phase, and using them it creates a set
-// of OptimizationProfileConfigs. These configs will be added to
-// IBuilderConfig before the engine is created.
-//
+// build/profile_generation_mode phase, and using them it creates a set of
+// OptimizationProfileConfigs. These configs will be added to IBuilderConfig
+// before the engine is created.
class TrtShapeOptimizationProfile {
public:
TrtShapeOptimizationProfile(){};